Example #1
0
def topology(request):
    """This fixture is used to standalone topology for the 'module'."""

    standalone = DirSrv(verbose=False)

    # Args for the standalone instance
    args_instance[SER_HOST] = HOST_STANDALONE
    args_instance[SER_PORT] = PORT_STANDALONE
    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
    args_standalone = args_instance.copy()
    standalone.allocate(args_standalone)

    # Get the status of the instance and restart it if it exists
    instance_standalone = standalone.exists()

    # Remove the instance
    if instance_standalone:
        standalone.delete()

    # Create the instance
    standalone.create()

    # Used to retrieve configuration information (dbdir, confdir...)
    standalone.open()

    # Delete each instance in the end
    def fin():
        standalone.delete()
    request.addfinalizer(fin)

    # Here we have standalone instance up and running
    return TopologyStandalone(standalone)
Example #2
0
def topology(request):
    """Create DS Deployment"""

    # Creating standalone instance ...
    if DEBUGGING:
        standalone = DirSrv(verbose=True)
    else:
        standalone = DirSrv(verbose=False)
    args_instance[SER_HOST] = HOST_STANDALONE
    args_instance[SER_PORT] = PORT_STANDALONE
    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
    args_standalone = args_instance.copy()
    standalone.allocate(args_standalone)
    instance_standalone = standalone.exists()
    if instance_standalone:
        standalone.delete()
    standalone.create()
    standalone.open()

    def fin():
        """If we are debugging just stop the instances, otherwise remove
        them
        """
        if DEBUGGING:
            standalone.stop(60)
        else:
            standalone.delete()

    request.addfinalizer(fin)

    # Clear out the tmp dir
    standalone.clearTmpDir(__file__)

    return TopologyStandalone(standalone)
Example #3
0
def topology(request):
    global installation1_prefix
    if installation1_prefix:
        args_instance[SER_DEPLOYED_DIR] = installation1_prefix

    # Creating standalone instance ...
    standalone = DirSrv(verbose=False)
    args_instance[SER_HOST] = HOST_STANDALONE
    args_instance[SER_PORT] = PORT_STANDALONE
    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
    args_standalone = args_instance.copy()
    standalone.allocate(args_standalone)
    instance_standalone = standalone.exists()
    if instance_standalone:
        standalone.delete()
    standalone.create()
    standalone.open()

    # Clear out the tmp dir
    standalone.clearTmpDir(__file__)

    def fin():
        standalone.delete()
        sbin_dir = standalone.get_sbin_dir()
        if not standalone.has_asan():
            valgrind_disable(sbin_dir)
    request.addfinalizer(fin)

    return TopologyStandalone(standalone)
def topology(request):
    # Creating standalone instance ...
    standalone = DirSrv(verbose=False)
    args_instance[SER_HOST] = HOST_STANDALONE
    args_instance[SER_PORT] = PORT_STANDALONE
    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
    args_standalone = args_instance.copy()
    standalone.allocate(args_standalone)
    instance_standalone = standalone.exists()
    if instance_standalone:
        standalone.delete()
    standalone.create()
    standalone.open()

    # Delete each instance in the end
    def fin():
        standalone.delete()

    request.addfinalizer(fin)

    # Clear out the tmp dir
    standalone.clearTmpDir(__file__)

    return TopologyStandalone(standalone)
Example #5
0
 def setUp(self):
     # Create the master instance
     master = DirSrv(verbose=False)
     master.log.debug("Master allocated")
     args = {SER_HOST:          HOST_MASTER,
             SER_PORT:          PORT_MASTER,
             SER_DEPLOYED_DIR:  INSTANCE_PREFIX,
             SER_SERVERID_PROP: SERVERID_MASTER
             }
     master.allocate(args)
     if master.exists():
         master.delete()
     master.create()
     master.open()
     self.master = master
     
     # Create the consumer instance
     consumer = DirSrv(verbose=False)
     consumer.log.debug("Consumer allocated")
     args = {SER_HOST:          HOST_CONSUMER,
             SER_PORT:          PORT_CONSUMER,
             SER_DEPLOYED_DIR:  INSTANCE_PREFIX,
             SER_SERVERID_PROP: SERVERID_CONSUMER
             }
     consumer.allocate(args)
     if consumer.exists():
         consumer.delete()
     consumer.create()
     consumer.open()
     self.consumer = consumer
Example #6
0
def topology(request):
    """Create DS Deployment"""

    # Creating standalone instance ...
    standalone = DirSrv(verbose=DEBUGGING)
    args_instance[SER_HOST] = HOST_STANDALONE
    args_instance[SER_PORT] = PORT_STANDALONE
    args_instance[SER_SECURE_PORT] = SECUREPORT_STANDALONE
    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
    args_standalone = args_instance.copy()
    standalone.allocate(args_standalone)
    instance_standalone = standalone.exists()
    if instance_standalone:
        standalone.delete()
    standalone.create()
    standalone.open()

    def fin():
        """If we are debugging just stop the instances, otherwise remove them
        """
        if DEBUGGING:
            standalone.stop()
        else:
            standalone.delete()

    request.addfinalizer(fin)

    return TopologyStandalone(standalone)
Example #7
0
def topology(request):
    # Creating standalone instance ...
    standalone = DirSrv(verbose=False)
    args_instance[SER_HOST] = HOST_STANDALONE
    args_instance[SER_PORT] = PORT_STANDALONE
    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
    args_standalone = args_instance.copy()
    standalone.allocate(args_standalone)
    instance_standalone = standalone.exists()
    if instance_standalone:
        standalone.delete()
    standalone.create()
    standalone.open()

    # Delete each instance in the end
    def fin():
        # This is useful for analysing the test env.
        standalone.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], excludeSuffixes=[], encrypt=False, \
            repl_data=True, outputfile='%s/ldif/%s.ldif' % (standalone.dbdir,SERVERID_STANDALONE ))
        standalone.clearBackupFS()
        standalone.backupFS()
        standalone.delete()
    request.addfinalizer(fin)

    # Clear out the tmp dir
    standalone.clearTmpDir(__file__)

    return TopologyStandalone(standalone)
Example #8
0
def topology(request):
    """
        This fixture is used to create a DirSrv instance for the 'module'.
    """
    schemainst = DirSrv(verbose=False)

    # Args for the master instance
    args_instance[SER_HOST] = HOST_STANDALONE
    args_instance[SER_PORT] = PORT_STANDALONE
    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
    schemainst.allocate(args_instance)

    # Remove all the instance
    if schemainst.exists():
        schemainst.delete()

    # Create the instance
    schemainst.create()
    schemainst.open()

    def fin():
        schemainst.delete()

    request.addfinalizer(fin)

    return TopologyStandalone(schemainst)
Example #9
0
def topology(request):
    # Create the master instance
    master = DirSrv(verbose=False)
    master.log.debug("Master allocated")
    args = {SER_HOST: HOST_MASTER,
            SER_PORT: PORT_MASTER,
            SER_SERVERID_PROP: SERVERID_MASTER}
    master.allocate(args)
    if master.exists():
        master.delete()
    master.create()
    master.open()

    # Create the consumer instance
    consumer = DirSrv(verbose=False)
    consumer.log.debug("Consumer allocated")
    args = {SER_HOST: HOST_CONSUMER,
            SER_PORT: PORT_CONSUMER,
            SER_SERVERID_PROP: SERVERID_CONSUMER}
    consumer.allocate(args)
    if consumer.exists():
        consumer.delete()
    consumer.create()
    consumer.open()

    # Delete each instance in the end
    def fin():
        master.delete()
        consumer.delete()
    request.addfinalizer(fin)

    return TopologyReplication(master, consumer)
Example #10
0
def topology(request):
    '''
        This fixture is used to standalone topology for the 'module'.
        At the beginning, It may exists a standalone instance.
        It may also exists a backup for the standalone instance.
    '''
    standalone = DirSrv(verbose=False)

    # Args for the standalone instance
    args_instance[SER_HOST] = HOST_STANDALONE
    args_instance[SER_PORT] = PORT_STANDALONE
    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
    args_standalone = args_instance.copy()
    standalone.allocate(args_standalone)

    # Get the status of the instance and restart it if it exists
    instance_standalone = standalone.exists()
    if instance_standalone:
        standalone.delete()

    # Create the instance
    standalone.create()

    # Used to retrieve configuration information (dbdir, confdir...)
    standalone.open()

    def fin():
        standalone.delete()
    request.addfinalizer(fin)

    return TopologyStandalone(standalone)
Example #11
0
def topology(request):
    '''
        This fixture is used to standalone topology for the 'module'.
    '''
    standalone = DirSrv(verbose=True)

    # Args for the standalone instance
    args_instance[SER_HOST] = HOST_STANDALONE
    args_instance[SER_PORT] = PORT_STANDALONE
    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
    args_standalone = args_instance.copy()
    standalone.allocate(args_standalone)

    # Get the status of the instance and restart it if it exists
    instance_standalone = standalone.exists()

    # Remove the instance
    if instance_standalone:
        standalone.delete()

    # Create the instance
    standalone.create()

    # Used to retrieve configuration information (dbdir, confdir...)
    standalone.open()

    # clear the tmp directory
    standalone.clearTmpDir(__file__)

    # Here we have standalone instance up and running
    return TopologyStandalone(standalone)
Example #12
0
def topology(request):
    global installation1_prefix
    if installation1_prefix:
        args_instance[SER_DEPLOYED_DIR] = installation1_prefix

    # Creating standalone instance ...
    standalone = DirSrv(verbose=False)
    args_instance[SER_HOST] = HOST_STANDALONE
    args_instance[SER_PORT] = PORT_STANDALONE
    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
    args_standalone = args_instance.copy()
    standalone.allocate(args_standalone)
    instance_standalone = standalone.exists()
    if instance_standalone:
        standalone.delete()
    standalone.create()
    standalone.open()

    def fin():
        standalone.delete()

    request.addfinalizer(fin)

    return TopologyStandalone(standalone)
Example #13
0
def topology(request):
    return
    # Create the realm first
    krb = MitKrb5(realm=REALM)
    if krb.check_realm():
        krb.destroy_realm()
    krb.create_realm()
    DEBUG = False

    # Creating master 1...
    master1 = DirSrv(verbose=DEBUG)
    args_instance[SER_HOST] = HOST_MASTER_1
    args_instance[SER_PORT] = PORT_MASTER_1
    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
    args_instance[SER_REALM] = REALM
    args_instance[SER_STRICT_HOSTNAME_CHECKING] = False
    args_master = args_instance.copy()
    master1.allocate(args_master)
    instance_master1 = master1.exists()
    if instance_master1:
        master1.delete()
    master1.create() # There is some magic in .create that finds the realm, and adds the keytab for us.
    master1.open()
    master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)

    # Creating master 2...
    master2 = DirSrv(verbose=DEBUG)
    args_instance[SER_HOST] = HOST_MASTER_2
    args_instance[SER_PORT] = PORT_MASTER_2
    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
    args_instance[SER_REALM] = REALM
    args_instance[SER_STRICT_HOSTNAME_CHECKING] = False
    args_master = args_instance.copy()
    master2.allocate(args_master)
    instance_master2 = master2.exists()
    if instance_master2:
        master2.delete()
    master2.create()
    master2.open()
    master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)

    # Delete each instance in the end
    def fin():
        master1.delete()
        master2.delete()
        if krb.check_realm():
            krb.destroy_realm()
    request.addfinalizer(fin)

    # Clear out the tmp dir
    master1.clearTmpDir(__file__)

    return TopologyReplication(master1, master2)
Example #14
0
def topology(request):
    # Master
    #
    # Create the master instance
    master = DirSrv(verbose=False)
    master.log.debug("Master allocated")
    args = {
        SER_HOST: HOST_MASTER,
        SER_PORT: PORT_MASTER,
        SER_SERVERID_PROP: SERVERID_MASTER
    }
    master.allocate(args)
    if master.exists():
        master.delete()
    master.create()
    master.open()

    # Enable replication
    master.replica.enableReplication(suffix=SUFFIX,
                                     role=ReplicaRole.MASTER,
                                     replicaId=REPLICAID_MASTER)

    # Consumer
    #
    # Create the consumer instance
    consumer = DirSrv(verbose=False)
    consumer.log.debug("Consumer allocated")
    args = {
        SER_HOST: HOST_CONSUMER,
        SER_PORT: PORT_CONSUMER,
        SER_SERVERID_PROP: SERVERID_CONSUMER
    }
    consumer.allocate(args)
    if consumer.exists():
        consumer.delete()
    consumer.create()
    consumer.open()

    # Enable replication
    consumer.replica.enableReplication(suffix=SUFFIX,
                                       role=ReplicaRole.CONSUMER)

    # Delete each instance in the end
    def fin():
        master.delete()
        consumer.delete()

    request.addfinalizer(fin)

    return TopologyReplication(master, consumer)
Example #15
0
 def setUp(self):
     instance = DirSrv(verbose=False)
     instance.log.debug("Instance allocated")
     args = {SER_HOST:          LOCALHOST,
             SER_PORT:          INSTANCE_PORT,
             SER_DEPLOYED_DIR:  INSTANCE_PREFIX,
             SER_SERVERID_PROP: INSTANCE_SERVERID
             }
     instance.allocate(args)
     if instance.exists():
         instance.delete()
     instance.create()
     instance.open()
     self.instance = instance
Example #16
0
 def setUp(self):
     instance = DirSrv(verbose=False)
     instance.log.debug("Instance allocated")
     args = {SER_HOST:          LOCALHOST,
             SER_PORT:          INSTANCE_PORT,
             SER_DEPLOYED_DIR:  INSTANCE_PREFIX,
             SER_SERVERID_PROP: INSTANCE_SERVERID
             }
     instance.allocate(args)
     if instance.exists():
         instance.delete()
     instance.create()
     instance.open()
     self.instance = instance
Example #17
0
    def setUp(self):
        #
        # Master
        #
        # Create the master instance
        master = DirSrv(verbose=False)
        master.log.debug("Master allocated")
        args = {
            SER_HOST: HOST_MASTER,
            SER_PORT: PORT_MASTER,
            SER_DEPLOYED_DIR: INSTANCE_PREFIX,
            SER_SERVERID_PROP: SERVERID_MASTER
        }
        master.allocate(args)
        if master.exists():
            master.delete()
        master.create()
        master.open()

        # enable replication
        master.replica.enableReplication(suffix=SUFFIX,
                                         role=REPLICAROLE_MASTER,
                                         replicaId=REPLICAID_MASTER)
        self.master = master

        #
        # Consumer
        #
        # Create the consumer instance
        consumer = DirSrv(verbose=False)
        consumer.log.debug("Consumer allocated")
        args = {
            SER_HOST: HOST_CONSUMER,
            SER_PORT: PORT_CONSUMER,
            SER_DEPLOYED_DIR: INSTANCE_PREFIX,
            SER_SERVERID_PROP: SERVERID_CONSUMER
        }
        consumer.allocate(args)
        if consumer.exists():
            consumer.delete()
        consumer.create()
        consumer.open()

        # enable replication
        consumer.replica.enableReplication(suffix=SUFFIX,
                                           role=REPLICAROLE_CONSUMER)
        self.consumer = consumer
Example #18
0
def topology(request):
    instance = DirSrv(verbose=False)
    instance.log.debug("Instance allocated")
    args = {SER_HOST: LOCALHOST,
            SER_PORT: INSTANCE_PORT,
            SER_SERVERID_PROP: INSTANCE_SERVERID}
    instance.allocate(args)
    if instance.exists():
        instance.delete()
    instance.create()
    instance.open()

    def fin():
        instance.delete()
    request.addfinalizer(fin)

    return TopologyInstance(instance)
Example #19
0
def topology(request):
    standalone = DirSrv(verbose=False)
    standalone.log.debug('Instance allocated')
    args = {SER_HOST: LOCALHOST,
            SER_PORT: INSTANCE_PORT,
            SER_SERVERID_PROP: INSTANCE_SERVERID}
    standalone.allocate(args)
    if standalone.exists():
        standalone.delete()
    standalone.create()
    standalone.open()

    def fin():
        standalone.delete()
    request.addfinalizer(fin)

    return TopologyStandalone(standalone)
Example #20
0
def topology(request):
    # Master
    #
    # Create the master instance
    master = DirSrv(verbose=False)
    master.log.debug("Master allocated")
    args = {SER_HOST: HOST_MASTER,
            SER_PORT: PORT_MASTER,
            SER_SERVERID_PROP: SERVERID_MASTER}
    master.allocate(args)
    if master.exists():
        master.delete()
    master.create()
    master.open()

    # Enable replication
    master.replica.enableReplication(suffix=SUFFIX,
                                     role=REPLICAROLE_MASTER,
                                     replicaId=REPLICAID_MASTER)

    # Consumer
    #
    # Create the consumer instance
    consumer = DirSrv(verbose=False)
    consumer.log.debug("Consumer allocated")
    args = {SER_HOST: HOST_CONSUMER,
            SER_PORT: PORT_CONSUMER,
            SER_SERVERID_PROP: SERVERID_CONSUMER}
    consumer.allocate(args)
    if consumer.exists():
        consumer.delete()
    consumer.create()
    consumer.open()

    # Enable replication
    consumer.replica.enableReplication(suffix=SUFFIX,
                                       role=REPLICAROLE_CONSUMER)

    # Delete each instance in the end
    def fin():
        master.delete()
        consumer.delete()
    request.addfinalizer(fin)

    return TopologyReplication(master, consumer)
Example #21
0
def topology(request):
    standalone = DirSrv(verbose=False)
    standalone.log.debug("Instance allocated")
    args = {SER_HOST: LOCALHOST,
            SER_PORT: INSTANCE_PORT,
            # SER_DEPLOYED_DIR:  INSTANCE_PREFIX,
            SER_SERVERID_PROP: INSTANCE_SERVERID}
    standalone.allocate(args)
    if standalone.exists():
        standalone.delete()
    standalone.create()
    standalone.open()

    def fin():
        standalone.delete()
    request.addfinalizer(fin)

    return TopologyStandalone(standalone)
Example #22
0
def topology(request):
    # Create the realm
    instance = DirSrv(verbose=False)
    instance.log.debug("Instance allocated")
    args = {SER_PORT: INSTANCE_PORT,
            SER_SERVERID_PROP: INSTANCE_SERVERID}
    instance.allocate(args)
    if instance.exists():
        instance.delete()
    instance.create()
    instance.open()

    def fin():
        if instance.exists():
            instance.delete()
    request.addfinalizer(fin)

    return TopologyInstance(instance)
Example #23
0
 def setUp(self):
     #
     # Master
     #
     # Create the master instance
     master = DirSrv(verbose=False)
     master.log.debug("Master allocated")
     args = {SER_HOST:          HOST_MASTER,
             SER_PORT:          PORT_MASTER,
             SER_DEPLOYED_DIR:  INSTANCE_PREFIX,
             SER_SERVERID_PROP: SERVERID_MASTER
             }
     master.allocate(args)
     if master.exists():
         master.delete()
     master.create()
     master.open()
     
     # enable replication
     master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER)
     self.master = master
     
     
     #
     # Consumer
     #
     # Create the consumer instance
     consumer = DirSrv(verbose=False)
     consumer.log.debug("Consumer allocated")
     args = {SER_HOST:          HOST_CONSUMER,
             SER_PORT:          PORT_CONSUMER,
             SER_DEPLOYED_DIR:  INSTANCE_PREFIX,
             SER_SERVERID_PROP: SERVERID_CONSUMER
             }
     consumer.allocate(args)
     if consumer.exists():
         consumer.delete()
     consumer.create()
     consumer.open()
     
     # enable replication
     consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER)
     self.consumer = consumer
Example #24
0
def create_instance(config_attr):
    log.info('create_instance - Installs the instance and Sets the value of InstScriptsEnabled to true OR false.')

    log.info("Set up the instance and set the config_attr")
    instance_data = generate_ds_params(1, ReplicaRole.STANDALONE)
    # Create instance
    standalone = DirSrv(verbose=False)

    # Args for the instance
    args_instance[SER_HOST] = instance_data[SER_HOST]
    args_instance[SER_PORT] = instance_data[SER_PORT]
    args_instance[SER_SERVERID_PROP] = instance_data[SER_SERVERID_PROP]
    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
    args_instance[SER_INST_SCRIPTS_ENABLED] = config_attr
    args_standalone = args_instance.copy()
    standalone.allocate(args_standalone)
    if standalone.exists():
        standalone.delete()
    standalone.create()
    standalone.open()
    return standalone
Example #25
0
def topology(request):
    # Creating standalone instance ...
    standalone = DirSrv(verbose=False)
    args_instance[SER_HOST] = HOST_STANDALONE
    args_instance[SER_PORT] = PORT_STANDALONE
    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
    args_standalone = args_instance.copy()
    standalone.allocate(args_standalone)
    instance_standalone = standalone.exists()
    if instance_standalone:
        standalone.delete()
    standalone.create()
    standalone.open()

    # Delete each instance in the end
    def fin():
        #standalone.delete()
        pass
    request.addfinalizer(fin)

    return TopologyStandalone(standalone)
Example #26
0
def topology(request):
    standalone = DirSrv(verbose=False)
    standalone.log.debug("Instance allocated")
    args = {
        SER_PORT: INSTANCE_PORT,
        # SER_DEPLOYED_DIR:  INSTANCE_PREFIX,
        SER_SERVERID_PROP: INSTANCE_SERVERID
    }
    standalone.allocate(args)
    if standalone.exists():
        standalone.delete()
    standalone.create()
    standalone.open()
    standalone.config.set('nsslapd-accesslog-logbuffering', 'off')

    def fin():
        standalone.delete()

    request.addfinalizer(fin)
    # We have to wait for time to elapse for the access log to be flushed.

    return TopologyStandalone(standalone)
Example #27
0
def topology(request):
    # Create the master instance
    master = DirSrv(verbose=False)
    master.log.debug("Master allocated")
    args = {
        SER_HOST: HOST_MASTER,
        SER_PORT: PORT_MASTER,
        SER_SERVERID_PROP: SERVERID_MASTER
    }
    master.allocate(args)
    if master.exists():
        master.delete()
    master.create()
    master.open()

    # Create the consumer instance
    consumer = DirSrv(verbose=False)
    consumer.log.debug("Consumer allocated")
    args = {
        SER_HOST: HOST_CONSUMER,
        SER_PORT: PORT_CONSUMER,
        SER_SERVERID_PROP: SERVERID_CONSUMER
    }
    consumer.allocate(args)
    if consumer.exists():
        consumer.delete()
    consumer.create()
    consumer.open()

    # Delete each instance in the end
    def fin():
        master.delete()
        consumer.delete()

    request.addfinalizer(fin)

    return TopologyReplication(master, consumer)
Example #28
0
def topology(request):
    # Create the realm
    krb = MitKrb5(realm=REALM)
    instance = DirSrv(verbose=False)
    instance.log.debug("Instance allocated")
    # WARNING: If this test fails it's like a hostname issue!!!
    args = {SER_HOST: socket.gethostname(),
            SER_PORT: INSTANCE_PORT,
            SER_REALM: REALM,
            SER_SERVERID_PROP: INSTANCE_SERVERID}
    instance.allocate(args)
    if instance.exists():
        instance.delete()
    # Its likely our realm exists too
    # Remove the old keytab
    if os.path.exists(KEYTAB):
        os.remove(KEYTAB)
    if krb.check_realm():
        krb.destroy_realm()
    # This will automatically create the krb entries
    krb.create_realm()
    instance.create()
    instance.open()

    def fin():
        if instance.exists():
            instance.delete()
        if krb.check_realm():
            krb.destroy_realm()
        if os.path.exists(KEYTAB):
            os.remove(KEYTAB)
        if os.path.exists(CCACHE):
            os.remove(CCACHE)
    request.addfinalizer(fin)

    return TopologyInstance(instance)
def topology(request):
    global installation1_prefix
    if installation1_prefix:
        args_instance[SER_DEPLOYED_DIR] = installation1_prefix

    # Creating master 1...
    master1 = DirSrv(verbose=False)
    args_instance[SER_HOST] = HOST_MASTER_1
    args_instance[SER_PORT] = PORT_MASTER_1
    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
    args_master = args_instance.copy()
    master1.allocate(args_master)
    instance_master1 = master1.exists()
    if instance_master1:
        master1.delete()
    master1.create()
    master1.open()
    master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)

    # Creating master 2...
    master2 = DirSrv(verbose=False)
    args_instance[SER_HOST] = HOST_MASTER_2
    args_instance[SER_PORT] = PORT_MASTER_2
    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
    args_master = args_instance.copy()
    master2.allocate(args_master)
    instance_master2 = master2.exists()
    if instance_master2:
        master2.delete()
    master2.create()
    master2.open()
    master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)

    #
    # Create all the agreements
    #
    # Creating agreement from master 1 to master 2
    properties = {RA_NAME:      'meTo_%s:%s' %(master2.host, master2.port),
                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
    m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
    if not m1_m2_agmt:
        log.fatal("Fail to create a master -> master replica agreement")
        sys.exit(1)
    log.debug("%s created" % m1_m2_agmt)

    # Creating agreement from master 2 to master 1
    properties = {RA_NAME:      'meTo_%s:%s' %(master1.host, master1.port),
                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
    m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
    if not m2_m1_agmt:
        log.fatal("Fail to create a master -> master replica agreement")
        sys.exit(1)
    log.debug("%s created" % m2_m1_agmt)

    # Allow the replicas to get situated with the new agreements...
    time.sleep(5)

    #
    # Initialize all the agreements
    #
    master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
    master1.waitForReplInit(m1_m2_agmt)
    master2.agreement.init(SUFFIX, HOST_MASTER_1, PORT_MASTER_1)
    master2.waitForReplInit(m2_m1_agmt)

    # Check replication is working...
    if master1.testReplication(DEFAULT_SUFFIX, master2):
        log.info('Replication is working.')
    else:
        log.fatal('Replication is not working.')
        assert False

    log.info("Set Replication Debugging loglevel for the errorlog")
    master1.setLogLevel(LOG_REPLICA)
    master2.setLogLevel(LOG_REPLICA)

    logging_attr = 'nsslapd-logging-hr-timestamps-enabled'
    master1.modify_s("cn=config", [(ldap.MOD_REPLACE, logging_attr, "off")])
    master2.modify_s("cn=config", [(ldap.MOD_REPLACE, logging_attr, "off")])

    # Delete each instance in the end
    def fin():
        master1.delete()
        master2.delete()
    request.addfinalizer(fin)

    # Clear out the tmp dir
    master1.clearTmpDir(__file__)

    return TopologyReplication(master1, master2, m1_m2_agmt, m2_m1_agmt)
Example #30
0
class Test_dirsrv():
    def _add_user(self, success=True):
        try:
            self.instance.add_s(Entry((TEST_REPL_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
                                                      'uid': 'test',
                                                      'sn': 'test',
                                                      'cn': 'test'})))
        except Exception as e:
            if success:
                raise
            else:
                self.instance.log.info('Fail to add (expected): %s' % e.args)
                return
            
        self.instance.log.info('%s added' % TEST_REPL_DN)

    def _mod_user(self, success=True):
        try:
            replace = [(ldap.MOD_REPLACE, 'description', str(randint(1, 100)))]
            self.instance.modify_s(TEST_REPL_DN, replace)
        except Exception as e:
            if success:
                raise
            else:
                self.instance.log.info('Fail to modify (expected): %s' % e.args)
                return
            
        self.instance.log.info('%s modified' % TEST_REPL_DN)

    def setUp(self):
        pass


    def tearDown(self):
        pass


    def test_allocate(self, verbose=False):
        instance = DirSrv(verbose=verbose)
        instance.log.debug("Instance allocated")
        assert instance.state == DIRSRV_STATE_INIT

        # Allocate the instance
        args = {SER_HOST:         LOCALHOST,
                SER_PORT:         INSTANCE_PORT,
                SER_DEPLOYED_DIR: INSTANCE_PREFIX,
                SER_SERVERID_PROP: INSTANCE_SERVERID
                }
        instance.allocate(args)
        
        userid = pwd.getpwuid( os.getuid() )[ 0 ]

        
        # Now verify the settings 
        assert instance.state     == DIRSRV_STATE_ALLOCATED
        assert instance.host      == LOCALHOST
        assert instance.port      == INSTANCE_PORT
        assert instance.sslport   == None
        assert instance.binddn    == DN_DM
        assert instance.bindpw    == PW_DM
        assert instance.creation_suffix == DEFAULT_SUFFIX
        assert instance.userid    == userid
        assert instance.serverid  == INSTANCE_SERVERID
        assert instance.groupid   == instance.userid
        assert instance.prefix    == INSTANCE_PREFIX
        assert instance.backupdir == INSTANCE_BACKUP
        
        # Now check we can change the settings of an allocated DirSrv
        args = {SER_SERVERID_PROP:INSTANCE_SERVERID,
                SER_HOST:         LOCALHOST,
                SER_PORT:         INSTANCE_PORT,
                SER_DEPLOYED_DIR: INSTANCE_PREFIX,
                SER_ROOT_DN: "uid=foo"}
        instance.allocate(args)
        assert instance.state     == DIRSRV_STATE_ALLOCATED
        assert instance.host      == LOCALHOST
        assert instance.port      == INSTANCE_PORT
        assert instance.sslport   == None
        assert instance.binddn    == "uid=foo"
        assert instance.bindpw    == PW_DM
        assert instance.creation_suffix == DEFAULT_SUFFIX
        assert instance.userid    == userid
        assert instance.serverid  == INSTANCE_SERVERID
        assert instance.groupid   == instance.userid
        assert instance.prefix    == INSTANCE_PREFIX
        assert instance.backupdir == INSTANCE_BACKUP
        
        # OK restore back the valid parameters
        args = {SER_SERVERID_PROP:INSTANCE_SERVERID,
                SER_HOST:         LOCALHOST,
                SER_PORT:         INSTANCE_PORT,
                SER_DEPLOYED_DIR: INSTANCE_PREFIX}
        instance.allocate(args)
        assert instance.state     == DIRSRV_STATE_ALLOCATED
        assert instance.host      == LOCALHOST
        assert instance.port      == INSTANCE_PORT
        assert instance.sslport   == None
        assert instance.binddn    == DN_DM
        assert instance.bindpw    == PW_DM
        assert instance.creation_suffix == DEFAULT_SUFFIX
        assert instance.userid    == userid
        assert instance.serverid  == INSTANCE_SERVERID
        assert instance.groupid   == instance.userid
        assert instance.prefix    == INSTANCE_PREFIX
        assert instance.backupdir == INSTANCE_BACKUP
        
        self.instance = instance
        
    def test_list_init(self):
        '''
            Lists the instances on the file system
        '''
        for properties in self.instance.list():
            self.instance.log.info("properties: %r" % properties)
            
        for properties in self.instance.list(all=True):
            self.instance.log.info("properties (all): %r" % properties)
        
    def test_allocated_to_offline(self):
        self.instance.create()

        
    def test_offline_to_online(self):
        self.instance.open()
        
    def test_online_to_offline(self):
        self.instance.close()
        
    def test_offline_to_allocated(self):
        self.instance.delete()
    
    def test_allocated_to_online(self, verbose):
        # Here the instance was already create, check we can connect to it
        # without creating it (especially without serverid value)
        # Allocate the instance
        args = {SER_HOST:         LOCALHOST,
                SER_PORT:         INSTANCE_PORT,
                SER_DEPLOYED_DIR: INSTANCE_PREFIX,
                SER_SERVERID_PROP: INSTANCE_SERVERID
                }
        self.instance.log.info("test_allocated_to_online: Create an instance")
        self.instance = DirSrv(verbose=verbose)
        assert not hasattr(self, 'serverid')
        self.instance.allocate(args)
        self.instance.create()
        self.instance.open()
        assert self.instance.serverid != None
        
        # The instance is create, allocate a new DirSrv
        self.instance.log.info("test_allocated_to_online: instance New")
        self.instance = DirSrv(verbose=verbose)
        assert not hasattr(self, 'serverid')
        assert self.instance.state == DIRSRV_STATE_INIT
        
        args = {SER_HOST:         LOCALHOST,
                SER_PORT:         INSTANCE_PORT,
                SER_DEPLOYED_DIR: INSTANCE_PREFIX,
                }
        self.instance.allocate(args)
        self.instance.log.info("test_allocated_to_online: instance Allocated")
        assert self.instance.serverid == None
        assert self.instance.state == DIRSRV_STATE_ALLOCATED
        
        self.instance.open()
        self.instance.log.info("test_allocated_to_online: instance online")
        assert self.instance.serverid != None
        assert self.instance.serverid == self.instance.inst
        assert self.instance.state == DIRSRV_STATE_ONLINE
Example #31
0
def test_dynamic_plugins(topology):
    """
        Test Dynamic Plugins - exercise each plugin and its main features, while
        changing the configuration without restarting the server.

        Need to test: functionality, stability, and stress.  These tests need to run
                      with replication disabled, and with replication setup with a
                      second instance.  Then test if replication is working, and we have
                      same entries on each side.

        Functionality - Make sure that as configuration changes are made they take
                        effect immediately.  Cross plugin interaction (e.g. automember/memberOf)
                        needs to tested, as well as plugin tasks.  Need to test plugin
                        config validation(dependencies, etc).

        Memory Corruption - Restart the plugins many times, and in different orders and test
                            functionality, and stability.  This will excerise the internal
                            plugin linked lists, dse callbacks, and task handlers.

        Stress - Put the server under load that will trigger multiple plugins(MO, RI, DNA, etc)
                 Restart various plugins while these operations are going on.  Perform this test
                 5 times(stress_max_run).

    """

    REPLICA_PORT = 33334
    RUV_FILTER = '(&(nsuniqueid=ffffffff-ffffffff-ffffffff-ffffffff)(objectclass=nstombstone))'
    master_maxcsn = 0
    replica_maxcsn = 0
    msg = ' (no replication)'
    replication_run = False
    stress_max_runs = 5

    # First enable dynamic plugins
    try:
        topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
    except ldap.LDAPError as e:
        ldap.fatal('Failed to enable dynamic plugin!' + e.message['desc'])
        assert False

    # Test that critical plugins can be updated even though the change might not be applied
    try:
        topology.standalone.modify_s(DN_LDBM, [(ldap.MOD_REPLACE, 'description', 'test')])
    except ldap.LDAPError as e:
        ldap.fatal('Failed to apply change to critical plugin' + e.message['desc'])
        assert False

    while 1:
        #
        # First run the tests with replication disabled, then rerun them with replication set up
        #

        ############################################################################
        #  Test plugin functionality
        ############################################################################

        log.info('####################################################################')
        log.info('Testing Dynamic Plugins Functionality' + msg + '...')
        log.info('####################################################################\n')

        plugin_tests.test_all_plugins(topology.standalone)

        log.info('####################################################################')
        log.info('Successfully Tested Dynamic Plugins Functionality' + msg + '.')
        log.info('####################################################################\n')

        ############################################################################
        # Test the stability by exercising the internal lists, callabcks, and task handlers
        ############################################################################

        log.info('####################################################################')
        log.info('Testing Dynamic Plugins for Memory Corruption' + msg + '...')
        log.info('####################################################################\n')
        prev_plugin_test = None
        prev_prev_plugin_test = None

        for plugin_test in plugin_tests.func_tests:
            #
            # Restart the plugin several times (and prev plugins) - work that linked list
            #
            plugin_test(topology.standalone, "restart")

            if prev_prev_plugin_test:
                prev_prev_plugin_test(topology.standalone, "restart")

            plugin_test(topology.standalone, "restart")

            if prev_plugin_test:
                prev_plugin_test(topology.standalone, "restart")

            plugin_test(topology.standalone, "restart")

            # Now run the functional test
            plugin_test(topology.standalone)

            # Set the previous tests
            if prev_plugin_test:
                prev_prev_plugin_test = prev_plugin_test
            prev_plugin_test = plugin_test

        log.info('####################################################################')
        log.info('Successfully Tested Dynamic Plugins for Memory Corruption' + msg + '.')
        log.info('####################################################################\n')

        ############################################################################
        # Stress two plugins while restarting it, and while restarting other plugins.
        # The goal is to not crash, and have the plugins work after stressing them.
        ############################################################################

        log.info('####################################################################')
        log.info('Stressing Dynamic Plugins' + msg + '...')
        log.info('####################################################################\n')

        stress_tests.configureMO(topology.standalone)
        stress_tests.configureRI(topology.standalone)

        stress_count = 0
        while stress_count < stress_max_runs:
            log.info('####################################################################')
            log.info('Running stress test' + msg + '.  Run (%d/%d)...' % (stress_count + 1, stress_max_runs))
            log.info('####################################################################\n')

            try:
                # Launch three new threads to add a bunch of users
                add_users = stress_tests.AddUsers(topology.standalone, 'employee', True)
                add_users.start()
                add_users2 = stress_tests.AddUsers(topology.standalone, 'entry', True)
                add_users2.start()
                add_users3 = stress_tests.AddUsers(topology.standalone, 'person', True)
                add_users3.start()
                time.sleep(1)

                # While we are adding users restart the MO plugin and an idle plugin
                topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
                topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
                time.sleep(1)
                topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
                time.sleep(1)
                topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
                topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
                topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
                time.sleep(1)
                topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
                topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
                time.sleep(2)
                topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
                time.sleep(1)
                topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
                topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
                topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
                topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
                time.sleep(1)
                topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
                topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
                topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)

                # Wait for the 'adding' threads to complete
                add_users.join()
                add_users2.join()
                add_users3.join()

                # Now launch three threads to delete the users
                del_users = stress_tests.DelUsers(topology.standalone, 'employee')
                del_users.start()
                del_users2 = stress_tests.DelUsers(topology.standalone, 'entry')
                del_users2.start()
                del_users3 = stress_tests.DelUsers(topology.standalone, 'person')
                del_users3.start()
                time.sleep(1)

                # Restart both the MO, RI plugins during these deletes, and an idle plugin
                topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
                topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
                topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
                topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
                time.sleep(1)
                topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
                time.sleep(1)
                topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
                time.sleep(1)
                topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
                time.sleep(1)
                topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
                topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
                topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
                topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
                topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
                topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
                topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
                time.sleep(2)
                topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
                time.sleep(1)
                topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
                time.sleep(1)
                topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
                time.sleep(1)
                topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
                topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)
                topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)

                # Wait for the 'deleting' threads to complete
                del_users.join()
                del_users2.join()
                del_users3.join()

                # Now make sure both the MO and RI plugins still work correctly
                plugin_tests.func_tests[8](topology.standalone)  # RI plugin
                plugin_tests.func_tests[5](topology.standalone)  # MO plugin

                # Cleanup the stress tests
                stress_tests.cleanup(topology.standalone)

            except:
                log.info('Stress test failed!')
                repl_fail(replica_inst)

            stress_count += 1
            log.info('####################################################################')
            log.info('Successfully Stressed Dynamic Plugins' + msg +
                     '.  Completed (%d/%d)' % (stress_count, stress_max_runs))
            log.info('####################################################################\n')

        if replication_run:
            # We're done.
            break
        else:
            #
            # Enable replication and run everything one more time
            #
            log.info('Setting up replication, and rerunning the tests...\n')

            # Create replica instance
            replica_inst = DirSrv(verbose=False)
            args_instance[SER_HOST] = LOCALHOST
            args_instance[SER_PORT] = REPLICA_PORT
            args_instance[SER_SERVERID_PROP] = 'replica'
            args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX

            args_replica_inst = args_instance.copy()
            replica_inst.allocate(args_replica_inst)
            replica_inst.create()
            replica_inst.open()

            try:
                topology.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX,
                                                              role=REPLICAROLE_MASTER,
                                                              replicaId=1)
                replica_inst.replica.enableReplication(suffix=DEFAULT_SUFFIX,
                                                              role=REPLICAROLE_CONSUMER,
                                                              replicaId=65535)
                properties = {RA_NAME: r'to_replica',
                              RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
                              RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
                              RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
                              RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}

                repl_agreement = topology.standalone.agreement.create(suffix=DEFAULT_SUFFIX,
                                                                      host=LOCALHOST,
                                                                      port=REPLICA_PORT,
                                                                      properties=properties)

                if not repl_agreement:
                    log.fatal("Fail to create a replica agreement")
                    repl_fail(replica_inst)

                topology.standalone.agreement.init(DEFAULT_SUFFIX, LOCALHOST, REPLICA_PORT)
                topology.standalone.waitForReplInit(repl_agreement)
            except:
                log.info('Failed to setup replication!')
                repl_fail(replica_inst)

            replication_run = True
            msg = ' (replication enabled)'
            time.sleep(1)

    ############################################################################
    # Check replication, and data are in sync, and remove the instance
    ############################################################################

    log.info('Checking if replication is in sync...')

    try:
        # Grab master's max CSN
        entry = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, RUV_FILTER)
        if not entry:
            log.error('Failed to find db tombstone entry from master')
            repl_fail(replica_inst)
        elements = entry[0].getValues('nsds50ruv')
        for ruv in elements:
            if 'replica 1' in ruv:
                parts = ruv.split()
                if len(parts) == 5:
                    master_maxcsn = parts[4]
                    break
                else:
                    log.error('RUV is incomplete')
                    repl_fail(replica_inst)
        if master_maxcsn == 0:
            log.error('Failed to find maxcsn on master')
            repl_fail(replica_inst)

    except ldap.LDAPError as e:
        log.fatal('Unable to search masterfor db tombstone: ' + e.message['desc'])
        repl_fail(replica_inst)

    # Loop on the consumer - waiting for it to catch up
    count = 0
    insync = False
    while count < 60:
        try:
            # Grab master's max CSN
            entry = replica_inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, RUV_FILTER)
            if not entry:
                log.error('Failed to find db tombstone entry on consumer')
                repl_fail(replica_inst)
            elements = entry[0].getValues('nsds50ruv')
            for ruv in elements:
                if 'replica 1' in ruv:
                    parts = ruv.split()
                    if len(parts) == 5:
                        replica_maxcsn = parts[4]
                        break
            if replica_maxcsn == 0:
                log.error('Failed to find maxcsn on consumer')
                repl_fail(replica_inst)
        except ldap.LDAPError as e:
            log.fatal('Unable to search for db tombstone on consumer: ' + e.message['desc'])
            repl_fail(replica_inst)

        if master_maxcsn == replica_maxcsn:
            insync = True
            log.info('Replication is in sync.\n')
            break
        count += 1
        time.sleep(1)

    # Report on replication status
    if not insync:
        log.error('Consumer not in sync with master!')
        repl_fail(replica_inst)

    #
    # Verify the databases are identical. There should not be any "user, entry, employee" entries
    #
    log.info('Checking if the data is the same between the replicas...')

    # Check the master
    try:
        entries = topology.standalone.search_s(DEFAULT_SUFFIX,
                                        ldap.SCOPE_SUBTREE,
                                        "(|(uid=person*)(uid=entry*)(uid=employee*))")
        if len(entries) > 0:
            log.error('Master database has incorrect data set!\n')
            repl_fail(replica_inst)
    except ldap.LDAPError as e:
        log.fatal('Unable to search db on master: ' + e.message['desc'])
        repl_fail(replica_inst)

    # Check the consumer
    try:
        entries = replica_inst.search_s(DEFAULT_SUFFIX,
                                        ldap.SCOPE_SUBTREE,
                                        "(|(uid=person*)(uid=entry*)(uid=employee*))")
        if len(entries) > 0:
            log.error('Consumer database in not consistent with master database')
            repl_fail(replica_inst)
    except ldap.LDAPError as e:
        log.fatal('Unable to search db on consumer: ' + e.message['desc'])
        repl_fail(replica_inst)

    log.info('Data is consistent across the replicas.\n')

    log.info('####################################################################')
    log.info('Replication consistency test passed')
    log.info('####################################################################\n')

    # Remove the replica instance
    replica_inst.delete()

    ############################################################################
    # We made it to the end!
    ############################################################################

    log.info('#####################################################')
    log.info('#####################################################')
    log.info("Dynamic Plugins Testsuite: Completed Successfully!")
    log.info('#####################################################')
    log.info('#####################################################\n')
Example #32
0
def topology(request):
    """Create Replication Deployment"""

    # Creating master 1...
    if DEBUGGING:
        master1 = DirSrv(verbose=True)
    else:
        master1 = DirSrv(verbose=False)
    args_instance[SER_HOST] = HOST_MASTER_1
    args_instance[SER_PORT] = PORT_MASTER_1
    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
    args_master = args_instance.copy()
    master1.allocate(args_master)
    instance_master1 = master1.exists()
    if instance_master1:
        master1.delete()
    master1.create()
    master1.open()
    master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)

    # Creating master 2...
    if DEBUGGING:
        master2 = DirSrv(verbose=True)
    else:
        master2 = DirSrv(verbose=False)
    args_instance[SER_HOST] = HOST_MASTER_2
    args_instance[SER_PORT] = PORT_MASTER_2
    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
    args_master = args_instance.copy()
    master2.allocate(args_master)
    instance_master2 = master2.exists()
    if instance_master2:
        master2.delete()
    master2.create()
    master2.open()
    master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)

    #
    # Create all the agreements
    #
    # Creating agreement from master 1 to master 2
    properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port),
                  RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
                  RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
                  RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
    m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
    if not m1_m2_agmt:
        log.fatal("Fail to create a master -> master replica agreement")
        sys.exit(1)
    log.debug("%s created" % m1_m2_agmt)

    # Creating agreement from master 2 to master 1
    properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port),
                  RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
                  RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
                  RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
    m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
    if not m2_m1_agmt:
        log.fatal("Fail to create a master -> master replica agreement")
        sys.exit(1)
    log.debug("%s created" % m2_m1_agmt)

    # Allow the replicas to get situated with the new agreements...
    time.sleep(5)

    #
    # Initialize all the agreements
    #
    master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
    master1.waitForReplInit(m1_m2_agmt)

    # Check replication is working...
    if master1.testReplication(DEFAULT_SUFFIX, master2):
        log.info('Replication is working.')
    else:
        log.fatal('Replication is not working.')
        assert False

    def fin():
        """If we are debugging just stop the instances, otherwise remove
        them
        """
        if DEBUGGING:
            master1.stop()
            master2.stop()
        else:
            master1.delete()
            master2.delete()

    request.addfinalizer(fin)

    # Clear out the tmp dir
    master1.clearTmpDir(__file__)

    return TopologyReplication(master1, master2)
Example #33
0
def _create_instances(topo_dict, suffix):
    """Create requested instances without replication or any other modifications

    :param topo_dict: a dictionary {ReplicaRole.STANDALONE: num, ReplicaRole.MASTER: num,
                                    ReplicaRole.HUB: num, ReplicaRole.CONSUMER: num}
    :type topo_dict: dict
    :param suffix: a suffix
    :type suffix: str

    :return - TopologyMain object
    """

    instances = {}
    ms = {}
    cs = {}
    hs = {}
    ins = {}

    # Create instances
    for role in topo_dict.keys():
        for inst_num in range(1, topo_dict[role]+1):
            instance_data = generate_ds_params(inst_num, role)
            if DEBUGGING:
                instance = DirSrv(verbose=True)
            else:
                instance = DirSrv(verbose=False)
            # TODO: Put 'args_instance' to generate_ds_params.
            # Also, we need to keep in mind that the function returns
            # SER_SECURE_PORT and REPLICA_ID that are not used in
            # the instance creation here.
            # args_instance[SER_HOST] = instance_data[SER_HOST]
            args_instance = {}
            args_instance[SER_PORT] = instance_data[SER_PORT]
            args_instance[SER_SECURE_PORT] = instance_data[SER_SECURE_PORT]
            args_instance[SER_SERVERID_PROP] = instance_data[SER_SERVERID_PROP]
            # It's required to be able to make a suffix-less install for
            # some cli tests. It's invalid to require replication with
            # no suffix however ....
            if suffix is not None:
                args_instance[SER_CREATION_SUFFIX] = suffix
            elif role != ReplicaRole.STANDALONE:
                raise AssertionError("Invalid request to make suffix-less replicated environment")

            instance.allocate(args_instance)

            instance_exists = instance.exists()

            if instance_exists:
                instance.delete(pyinstall=PYINSTALL)

            instance.create(pyinstall=PYINSTALL)
            # We set a URL here to force ldap:// only. Once we turn on TLS
            # we'll flick this to ldaps.
            instance.use_ldap_uri()
            instance.open()
            if role == ReplicaRole.STANDALONE:
                ins[instance.serverid] = instance
                instances.update(ins)
            if role == ReplicaRole.MASTER:
                ms[instance.serverid] = instance
                instances.update(ms)
            if role == ReplicaRole.CONSUMER:
                cs[instance.serverid] = instance
                instances.update(cs)
            if role == ReplicaRole.HUB:
                hs[instance.serverid] = instance
                instances.update(hs)
            if DEBUGGING:
                instance.config.set('nsslapd-accesslog-logbuffering','off')
                instance.config.set('nsslapd-errorlog-level','8192')
                instance.config.set('nsslapd-auditlog-logging-enabled','on')
            log.info("Instance with parameters {} was created.".format(args_instance))

    if "standalone1" in instances and len(instances) == 1:
        return TopologyMain(standalones=instances["standalone1"])
    else:
        return TopologyMain(standalones=ins, masters=ms, consumers=cs, hubs=hs)
Example #34
0
def topology(request):
    '''
        This fixture is used to create a replicated topology for the 'module'.
        The replicated topology is MASTER1 <-> Master2.
    '''
    global installation1_prefix
    global installation2_prefix

    # allocate master1 on a given deployement
    master1 = DirSrv(verbose=False)
    if installation1_prefix:
        args_instance[SER_DEPLOYED_DIR] = installation1_prefix

    # Args for the master1 instance
    args_instance[SER_HOST] = HOST_MASTER_1
    args_instance[SER_PORT] = PORT_MASTER_1
    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
    args_master = args_instance.copy()
    master1.allocate(args_master)

    # allocate master1 on a given deployement
    master2 = DirSrv(verbose=False)
    if installation2_prefix:
        args_instance[SER_DEPLOYED_DIR] = installation2_prefix

    # Args for the consumer instance
    args_instance[SER_HOST] = HOST_MASTER_2
    args_instance[SER_PORT] = PORT_MASTER_2
    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
    args_master = args_instance.copy()
    master2.allocate(args_master)

    # Get the status of the instance and restart it if it exists
    instance_master1 = master1.exists()
    instance_master2 = master2.exists()

    # Remove all the instances
    if instance_master1:
        master1.delete()
    if instance_master2:
        master2.delete()

    # Create the instances
    master1.create()
    master1.open()
    master2.create()
    master2.open()

    #
    # Now prepare the Master-Consumer topology
    #
    # First Enable replication
    master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
    master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)

    # Initialize the supplier->consumer

    properties = {RA_NAME:      r'meTo_$host:$port',
                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
    repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)

    if not repl_agreement:
        log.fatal("Fail to create a replica agreement")
        sys.exit(1)

    log.debug("%s created" % repl_agreement)

    properties = {RA_NAME:      r'meTo_$host:$port',
                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
    master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)

    master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
    master1.waitForReplInit(repl_agreement)

    # Check replication is working fine
    if master1.testReplication(DEFAULT_SUFFIX, master2):
        log.info('Replication is working.')
    else:
        log.fatal('Replication is not working.')
        assert False

    def fin():
        master1.delete()
        master2.delete()
    request.addfinalizer(fin)

    # Here we have two instances master and consumer
    # with replication working.
    return TopologyMaster1Master2(master1, master2)
Example #35
0
def topology(request):
    global installation1_prefix
    if installation1_prefix:
        args_instance[SER_DEPLOYED_DIR] = installation1_prefix

    # Creating master 1...
    master1 = DirSrv(verbose=False)
    if installation1_prefix:
        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
    args_instance[SER_HOST] = HOST_MASTER_1
    args_instance[SER_PORT] = PORT_MASTER_1
    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
    args_master = args_instance.copy()
    master1.allocate(args_master)
    instance_master1 = master1.exists()
    if instance_master1:
        master1.delete()
    master1.create()
    master1.open()
    master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)

    # Creating master 2...
    master2 = DirSrv(verbose=False)
    if installation1_prefix:
        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
    args_instance[SER_HOST] = HOST_MASTER_2
    args_instance[SER_PORT] = PORT_MASTER_2
    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
    args_master = args_instance.copy()
    master2.allocate(args_master)
    instance_master2 = master2.exists()
    if instance_master2:
        master2.delete()
    master2.create()
    master2.open()
    master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)

    #
    # Create all the agreements
    #
    # Creating agreement from master 1 to master 2
    properties = {RA_NAME:      r'meTo_%s:%s' % (master2.host, master2.port),
                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
    global m1_m2_agmt
    m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
    if not m1_m2_agmt:
        log.fatal("Fail to create a master -> master replica agreement")
        sys.exit(1)
    log.debug("%s created" % m1_m2_agmt)

    # Creating agreement from master 2 to master 1
    properties = {RA_NAME:      r'meTo_%s:%s' % (master1.host, master1.port),
                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
    m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
    if not m2_m1_agmt:
        log.fatal("Fail to create a master -> master replica agreement")
        sys.exit(1)
    log.debug("%s created" % m2_m1_agmt)

    # Allow the replicas to get situated with the new agreements...
    time.sleep(5)

    #
    # Initialize all the agreements
    #
    master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
    master1.waitForReplInit(m1_m2_agmt)

    # Check replication is working...
    if master1.testReplication(DEFAULT_SUFFIX, master2):
        log.info('Replication is working.')
    else:
        log.fatal('Replication is not working.')
        assert False

    # Delete each instance in the end
    def fin():
        master1.delete()
        master2.delete()
    request.addfinalizer(fin)

    return TopologyReplication(master1, master2)
Example #36
0
def topology(request):
    """
        This fixture is used to create a replicated topology for the 'module'.
        The replicated topology is MASTER -> Consumer.
    """
    master = DirSrv(verbose=False)
    consumer = DirSrv(verbose=False)

    # Args for the master instance
    args_instance[SER_HOST] = HOST_MASTER_1
    args_instance[SER_PORT] = PORT_MASTER_1
    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
    args_master = args_instance.copy()
    master.allocate(args_master)

    # Args for the consumer instance
    args_instance[SER_HOST] = HOST_CONSUMER_1
    args_instance[SER_PORT] = PORT_CONSUMER_1
    args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
    args_consumer = args_instance.copy()
    consumer.allocate(args_consumer)

    # Get the status of the instance and restart it if it exists
    instance_master = master.exists()
    instance_consumer = consumer.exists()

    # Remove all the instances
    if instance_master:
        master.delete()
    if instance_consumer:
        consumer.delete()

    # Create the instances
    master.create()
    master.open()
    consumer.create()
    consumer.open()

    #
    # Now prepare the Master-Consumer topology
    #
    # First Enable replication
    master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
    consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER)

    # Initialize the supplier->consumer

    properties = {
        RA_NAME: r"meTo_$host:$port",
        RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
        RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
        RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
        RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT],
    }
    repl_agreement = master.agreement.create(
        suffix=SUFFIX, host=consumer.host, port=consumer.port, properties=properties
    )

    if not repl_agreement:
        log.fatal("Fail to create a replica agreement")
        sys.exit(1)

    log.debug("%s created" % repl_agreement)
    master.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
    master.waitForReplInit(repl_agreement)

    # Check replication is working fine
    if master.testReplication(DEFAULT_SUFFIX, consumer):
        log.info("Replication is working.")
    else:
        log.fatal("Replication is not working.")
        assert False

    def fin():
        master.delete()
        consumer.delete()

    request.addfinalizer(fin)
    #
    # Here we have two instances master and consumer
    # with replication working. Either coming from a backup recovery
    # or from a fresh (re)init
    # Time to return the topology
    return TopologyMasterConsumer(master, consumer)
Example #37
0
def topology(request):
    global installation1_prefix
    if installation1_prefix:
        args_instance[SER_DEPLOYED_DIR] = installation1_prefix

    # Creating supplier 1...
    supplier1 = DirSrv(verbose=DEBUGGING)
    args_instance[SER_HOST] = HOST_SUPPLIER_1
    args_instance[SER_PORT] = PORT_SUPPLIER_1
    args_instance[SER_SECURE_PORT] = SECUREPORT_SUPPLIER_1
    args_instance[SER_SERVERID_PROP] = SERVERID_SUPPLIER_1
    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
    args_supplier = args_instance.copy()
    supplier1.allocate(args_supplier)
    instance_supplier1 = supplier1.exists()
    if instance_supplier1:
        supplier1.delete()
    supplier1.create()
    supplier1.open()
    supplier1.replica.enableReplication(suffix=SUFFIX,
                                        role=ReplicaRole.SUPPLIER,
                                        replicaId=REPLICAID_SUPPLIER_1)

    # Creating supplier 2...
    supplier2 = DirSrv(verbose=DEBUGGING)
    args_instance[SER_HOST] = HOST_SUPPLIER_2
    args_instance[SER_PORT] = PORT_SUPPLIER_2
    args_instance[SER_SECURE_PORT] = SECUREPORT_SUPPLIER_2
    args_instance[SER_SERVERID_PROP] = SERVERID_SUPPLIER_2
    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
    args_supplier = args_instance.copy()
    supplier2.allocate(args_supplier)
    instance_supplier2 = supplier2.exists()
    if instance_supplier2:
        supplier2.delete()
    supplier2.create()
    supplier2.open()
    supplier2.replica.enableReplication(suffix=SUFFIX,
                                        role=ReplicaRole.SUPPLIER,
                                        replicaId=REPLICAID_SUPPLIER_2)

    #
    # Create all the agreements
    #
    # Creating agreement from supplier 1 to supplier 2
    properties = {
        RA_NAME: r'meTo_$host:$port',
        RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
        RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
        RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
        RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]
    }
    m1_m2_agmt = supplier1.agreement.create(suffix=SUFFIX,
                                            host=supplier2.host,
                                            port=supplier2.port,
                                            properties=properties)
    if not m1_m2_agmt:
        log.fatal("Fail to create a supplier -> supplier replica agreement")
        sys.exit(1)
    log.debug("%s created" % m1_m2_agmt)

    # Creating agreement from supplier 2 to supplier 1
    properties = {
        RA_NAME: r'meTo_$host:$port',
        RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
        RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
        RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
        RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]
    }
    m2_m1_agmt = supplier2.agreement.create(suffix=SUFFIX,
                                            host=supplier1.host,
                                            port=supplier1.port,
                                            properties=properties)
    if not m2_m1_agmt:
        log.fatal("Fail to create a supplier -> supplier replica agreement")
        sys.exit(1)
    log.debug("%s created" % m2_m1_agmt)

    # Allow the replicas to get situated with the new agreements...
    time.sleep(5)

    #
    # Import tests entries into supplier1 before we initialize supplier2
    #
    ldif_dir = supplier1.get_ldif_dir()

    import_ldif = ldif_dir + '/rel7.5-entries.ldif'

    # First generate an ldif
    try:
        ldif = open(import_ldif, 'w')
    except IOError as e:
        log.fatal('Failed to create test ldif, error: %s - %s' %
                  (e.errno, e.strerror))
        assert False

    # Create the root node
    ldif.write('dn: ' + DEFAULT_SUFFIX + '\n')
    ldif.write('objectclass: top\n')
    ldif.write('objectclass: domain\n')
    ldif.write('dc: example\n')
    ldif.write('\n')

    # Create the entries
    idx = 0
    while idx < NUM_USERS:
        count = str(idx)
        ldif.write('dn: uid=supplier1_entry' + count + ',' + DEFAULT_SUFFIX +
                   '\n')
        ldif.write('objectclass: top\n')
        ldif.write('objectclass: person\n')
        ldif.write('objectclass: inetorgperson\n')
        ldif.write('objectclass: organizationalperson\n')
        ldif.write('uid: supplier1_entry' + count + '\n')
        ldif.write('cn: supplier1 entry' + count + '\n')
        ldif.write('givenname: supplier1 ' + count + '\n')
        ldif.write('sn: entry ' + count + '\n')
        ldif.write('userpassword: supplier1_entry' + count + '\n')
        ldif.write('description: ' + 'a' * random.randint(1, 1000) + '\n')
        ldif.write('\n')

        ldif.write('dn: uid=supplier2_entry' + count + ',' + DEFAULT_SUFFIX +
                   '\n')
        ldif.write('objectclass: top\n')
        ldif.write('objectclass: person\n')
        ldif.write('objectclass: inetorgperson\n')
        ldif.write('objectclass: organizationalperson\n')
        ldif.write('uid: supplier2_entry' + count + '\n')
        ldif.write('cn: supplier2 entry' + count + '\n')
        ldif.write('givenname: supplier2 ' + count + '\n')
        ldif.write('sn: entry ' + count + '\n')
        ldif.write('userpassword: supplier2_entry' + count + '\n')
        ldif.write('description: ' + 'a' * random.randint(1, 1000) + '\n')
        ldif.write('\n')
        idx += 1

    ldif.close()

    # Now import it
    try:
        supplier1.tasks.importLDIF(suffix=DEFAULT_SUFFIX,
                                   input_file=import_ldif,
                                   args={TASK_WAIT: True})
    except ValueError:
        log.fatal('test_reliab_7.5: Online import failed')
        assert False

    #
    # Initialize all the agreements
    #
    supplier1.agreement.init(SUFFIX, HOST_SUPPLIER_2, PORT_SUPPLIER_2)
    supplier1.waitForReplInit(m1_m2_agmt)

    # Check replication is working...
    if supplier1.testReplication(DEFAULT_SUFFIX, supplier2):
        log.info('Replication is working.')
    else:
        log.fatal('Replication is not working.')
        assert False

    # Clear out the tmp dir
    supplier1.clearTmpDir(__file__)

    # Delete each instance in the end
    def fin():
        supplier1.delete()
        supplier2.delete()
        if ENABLE_VALGRIND:
            sbin_dir = get_sbin_dir(prefix=supplier1.prefix)
            valgrind_disable(sbin_dir)

    request.addfinalizer(fin)

    return TopologyReplication(supplier1, supplier2)
Example #38
0
def topology(request):
    global installation1_prefix
    if installation1_prefix:
        args_instance[SER_DEPLOYED_DIR] = installation1_prefix

    # Creating master 1...
    master1 = DirSrv(verbose=False)
    args_instance[SER_HOST] = HOST_MASTER_1
    args_instance[SER_PORT] = PORT_MASTER_1
    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
    args_master = args_instance.copy()
    master1.allocate(args_master)
    instance_master1 = master1.exists()
    if instance_master1:
        master1.delete()
    master1.create()
    master1.open()
    master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
                                      replicaId=REPLICAID_MASTER_1)

    # Creating hub 1...
    hub1 = DirSrv(verbose=False)
    args_instance[SER_HOST] = HOST_HUB_1
    args_instance[SER_PORT] = PORT_HUB_1
    args_instance[SER_SERVERID_PROP] = SERVERID_HUB_1
    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
    args_hub = args_instance.copy()
    hub1.allocate(args_hub)
    instance_hub1 = hub1.exists()
    if instance_hub1:
        hub1.delete()
    hub1.create()
    hub1.open()
    hub1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_HUB,
                                   replicaId=REPLICAID_HUB_1)

    # Creating consumer 1...
    consumer1 = DirSrv(verbose=False)
    args_instance[SER_HOST] = HOST_CONSUMER_1
    args_instance[SER_PORT] = PORT_CONSUMER_1
    args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
    args_consumer = args_instance.copy()
    consumer1.allocate(args_consumer)
    instance_consumer1 = consumer1.exists()
    if instance_consumer1:
        consumer1.delete()
    consumer1.create()
    consumer1.open()
    consumer1.changelog.create()
    consumer1.replica.enableReplication(suffix=SUFFIX,
                                        role=REPLICAROLE_CONSUMER,
                                        replicaId=CONSUMER_REPLICAID)

    #
    # Create all the agreements
    #
    # Creating agreement from master 1 to hub 1
    properties = {RA_NAME: r'meTo_$host:$port',
                  RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
                  RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
                  RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
    m1_h1_agmt = master1.agreement.create(suffix=SUFFIX, host=hub1.host,
                                          port=hub1.port,
                                          properties=properties)
    if not m1_h1_agmt:
        log.fatal("Fail to create a master -> hub replica agreement")
        sys.exit(1)
    log.debug("%s created" % m1_h1_agmt)

    # Creating agreement from hub 1 to consumer 1
    properties = {RA_NAME: r'meTo_$host:$port',
                  RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
                  RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
                  RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
    h1_c1_agmt = hub1.agreement.create(suffix=SUFFIX, host=consumer1.host,
                                       port=consumer1.port,
                                       properties=properties)
    if not h1_c1_agmt:
        log.fatal("Fail to create a hub -> consumer replica agreement")
        sys.exit(1)
    log.debug("%s created" % h1_c1_agmt)

    # Allow the replicas to get situated with the new agreements...
    time.sleep(5)

    #
    # Initialize all the agreements
    #
    master1.agreement.init(SUFFIX, HOST_HUB_1, PORT_HUB_1)
    master1.waitForReplInit(m1_h1_agmt)
    hub1.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
    hub1.waitForReplInit(h1_c1_agmt)

    # Check replication is working...
    if master1.testReplication(DEFAULT_SUFFIX, consumer1):
        log.info('Replication is working.')
    else:
        log.fatal('Replication is not working.')
        assert False

    # Delete each instance in the end
    def fin():
        master1.delete()
        hub1.delete()
        consumer1.delete()
        pass

    request.addfinalizer(fin)

    # Clear out the tmp dir
    master1.clearTmpDir(__file__)

    return TopologyReplication(master1, hub1, consumer1)
Example #39
0
class Test_dirsrv():
    def _add_user(self, success=True):
        try:
            self.instance.add_s(
                Entry((TEST_REPL_DN, {
                    'objectclass':
                    "top person organizationalPerson inetOrgPerson".split(),
                    'uid':
                    'test',
                    'sn':
                    'test',
                    'cn':
                    'test'
                })))
        except Exception as e:
            if success:
                raise
            else:
                self.instance.log.info('Fail to add (expected): %s' % e.args)
                return

        self.instance.log.info('%s added' % TEST_REPL_DN)

    def _mod_user(self, success=True):
        try:
            replace = [(ldap.MOD_REPLACE, 'description', str(randint(1, 100)))]
            self.instance.modify_s(TEST_REPL_DN, replace)
        except Exception as e:
            if success:
                raise
            else:
                self.instance.log.info('Fail to modify (expected): %s' %
                                       e.args)
                return

        self.instance.log.info('%s modified' % TEST_REPL_DN)

    def setUp(self):
        pass

    def tearDown(self):
        pass

    def test_allocate(self, verbose=False):
        instance = DirSrv(verbose=verbose)
        instance.log.debug("Instance allocated")
        assert instance.state == DIRSRV_STATE_INIT

        # Allocate the instance
        args = {
            SER_HOST: LOCALHOST,
            SER_PORT: INSTANCE_PORT,
            SER_DEPLOYED_DIR: INSTANCE_PREFIX,
            SER_SERVERID_PROP: INSTANCE_SERVERID
        }
        instance.allocate(args)

        userid = pwd.getpwuid(os.getuid())[0]

        # Now verify the settings
        assert instance.state == DIRSRV_STATE_ALLOCATED
        assert instance.host == LOCALHOST
        assert instance.port == INSTANCE_PORT
        assert instance.sslport == None
        assert instance.binddn == DN_DM
        assert instance.bindpw == PW_DM
        assert instance.creation_suffix == DEFAULT_SUFFIX
        assert instance.userid == userid
        assert instance.serverid == INSTANCE_SERVERID
        assert instance.groupid == instance.userid
        assert instance.prefix == INSTANCE_PREFIX
        assert instance.backupdir == INSTANCE_BACKUP

        # Now check we can change the settings of an allocated DirSrv
        args = {
            SER_SERVERID_PROP: INSTANCE_SERVERID,
            SER_HOST: LOCALHOST,
            SER_PORT: INSTANCE_PORT,
            SER_DEPLOYED_DIR: INSTANCE_PREFIX,
            SER_ROOT_DN: "uid=foo"
        }
        instance.allocate(args)
        assert instance.state == DIRSRV_STATE_ALLOCATED
        assert instance.host == LOCALHOST
        assert instance.port == INSTANCE_PORT
        assert instance.sslport == None
        assert instance.binddn == "uid=foo"
        assert instance.bindpw == PW_DM
        assert instance.creation_suffix == DEFAULT_SUFFIX
        assert instance.userid == userid
        assert instance.serverid == INSTANCE_SERVERID
        assert instance.groupid == instance.userid
        assert instance.prefix == INSTANCE_PREFIX
        assert instance.backupdir == INSTANCE_BACKUP

        # OK restore back the valid parameters
        args = {
            SER_SERVERID_PROP: INSTANCE_SERVERID,
            SER_HOST: LOCALHOST,
            SER_PORT: INSTANCE_PORT,
            SER_DEPLOYED_DIR: INSTANCE_PREFIX
        }
        instance.allocate(args)
        assert instance.state == DIRSRV_STATE_ALLOCATED
        assert instance.host == LOCALHOST
        assert instance.port == INSTANCE_PORT
        assert instance.sslport == None
        assert instance.binddn == DN_DM
        assert instance.bindpw == PW_DM
        assert instance.creation_suffix == DEFAULT_SUFFIX
        assert instance.userid == userid
        assert instance.serverid == INSTANCE_SERVERID
        assert instance.groupid == instance.userid
        assert instance.prefix == INSTANCE_PREFIX
        assert instance.backupdir == INSTANCE_BACKUP

        self.instance = instance

    def test_list_init(self):
        '''
            Lists the instances on the file system
        '''
        for properties in self.instance.list():
            self.instance.log.info("properties: %r" % properties)

        for properties in self.instance.list(all=True):
            self.instance.log.info("properties (all): %r" % properties)

    def test_allocated_to_offline(self):
        self.instance.create()

    def test_offline_to_online(self):
        self.instance.open()

    def test_online_to_offline(self):
        self.instance.close()

    def test_offline_to_allocated(self):
        self.instance.delete()

    def test_allocated_to_online(self, verbose):
        # Here the instance was already create, check we can connect to it
        # without creating it (especially without serverid value)
        # Allocate the instance
        args = {
            SER_HOST: LOCALHOST,
            SER_PORT: INSTANCE_PORT,
            SER_DEPLOYED_DIR: INSTANCE_PREFIX,
            SER_SERVERID_PROP: INSTANCE_SERVERID
        }
        self.instance.log.info("test_allocated_to_online: Create an instance")
        self.instance = DirSrv(verbose=verbose)
        assert not hasattr(self, 'serverid')
        self.instance.allocate(args)
        self.instance.create()
        self.instance.open()
        assert self.instance.serverid != None

        # The instance is create, allocate a new DirSrv
        self.instance.log.info("test_allocated_to_online: instance New")
        self.instance = DirSrv(verbose=verbose)
        assert not hasattr(self, 'serverid')
        assert self.instance.state == DIRSRV_STATE_INIT

        args = {
            SER_HOST: LOCALHOST,
            SER_PORT: INSTANCE_PORT,
            SER_DEPLOYED_DIR: INSTANCE_PREFIX,
        }
        self.instance.allocate(args)
        self.instance.log.info("test_allocated_to_online: instance Allocated")
        assert self.instance.serverid == None
        assert self.instance.state == DIRSRV_STATE_ALLOCATED

        self.instance.open()
        self.instance.log.info("test_allocated_to_online: instance online")
        assert self.instance.serverid != None
        assert self.instance.serverid == self.instance.inst
        assert self.instance.state == DIRSRV_STATE_ONLINE
Example #40
0
def topology(request):
    global installation1_prefix
    if installation1_prefix:
        args_instance[SER_DEPLOYED_DIR] = installation1_prefix

    # Creating master 1...
    master1 = DirSrv(verbose=False)
    args_instance[SER_HOST] = HOST_MASTER_1
    args_instance[SER_PORT] = PORT_MASTER_1
    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
    args_master = args_instance.copy()
    master1.allocate(args_master)
    instance_master1 = master1.exists()
    if instance_master1:
        master1.delete()
    master1.create()
    master1.open()
    master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
                                      replicaId=REPLICAID_MASTER_1)

    # Creating master 2...
    master2 = DirSrv(verbose=False)
    args_instance[SER_HOST] = HOST_MASTER_2
    args_instance[SER_PORT] = PORT_MASTER_2
    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
    args_master = args_instance.copy()
    master2.allocate(args_master)
    instance_master2 = master2.exists()
    if instance_master2:
        master2.delete()
    master2.create()
    master2.open()
    master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
                                      replicaId=REPLICAID_MASTER_2)

    #
    # Create all the agreements
    #
    # Creating agreement from master 1 to master 2
    properties = {RA_NAME: r'meTo_$host:$port',
                  RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
                  RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
                  RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
    m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
                                          port=master2.port,
                                          properties=properties)
    if not m1_m2_agmt:
        log.fatal("Fail to create a master -> master replica agreement")
        sys.exit(1)
    log.debug("%s created" % m1_m2_agmt)

    # Creating agreement from master 2 to master 1
    properties = {RA_NAME: r'meTo_$host:$port',
                  RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
                  RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
                  RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
    m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
                                          port=master1.port,
                                          properties=properties)
    if not m2_m1_agmt:
        log.fatal("Fail to create a master -> master replica agreement")
        sys.exit(1)
    log.debug("%s created" % m2_m1_agmt)

    # Allow the replicas to get situated with the new agreements...
    time.sleep(5)

    #
    # Import tests entries into master1 before we initialize master2
    #
    ldif_dir = master1.get_ldif_dir()

    import_ldif = ldif_dir + '/rel7.5-entries.ldif'

    # First generate an ldif
    try:
        ldif = open(import_ldif, 'w')
    except IOError as e:
        log.fatal('Failed to create test ldif, error: %s - %s' %
                  (e.errno, e.strerror))
        assert False

    # Create the root node
    ldif.write('dn: ' + DEFAULT_SUFFIX + '\n')
    ldif.write('objectclass: top\n')
    ldif.write('objectclass: domain\n')
    ldif.write('dc: example\n')
    ldif.write('\n')

    # Create the entries
    idx = 0
    while idx < NUM_USERS:
        count = str(idx)
        ldif.write('dn: uid=master1_entry' + count + ',' +
                   DEFAULT_SUFFIX + '\n')
        ldif.write('objectclass: top\n')
        ldif.write('objectclass: person\n')
        ldif.write('objectclass: inetorgperson\n')
        ldif.write('objectclass: organizationalperson\n')
        ldif.write('uid: master1_entry' + count + '\n')
        ldif.write('cn: master1 entry' + count + '\n')
        ldif.write('givenname: master1 ' + count + '\n')
        ldif.write('sn: entry ' + count + '\n')
        ldif.write('userpassword: master1_entry' + count + '\n')
        ldif.write('description: ' + 'a' * random.randint(1, 1000) + '\n')
        ldif.write('\n')

        ldif.write('dn: uid=master2_entry' + count + ',' +
                   DEFAULT_SUFFIX + '\n')
        ldif.write('objectclass: top\n')
        ldif.write('objectclass: person\n')
        ldif.write('objectclass: inetorgperson\n')
        ldif.write('objectclass: organizationalperson\n')
        ldif.write('uid: master2_entry' + count + '\n')
        ldif.write('cn: master2 entry' + count + '\n')
        ldif.write('givenname: master2 ' + count + '\n')
        ldif.write('sn: entry ' + count + '\n')
        ldif.write('userpassword: master2_entry' + count + '\n')
        ldif.write('description: ' + 'a' * random.randint(1, 1000) + '\n')
        ldif.write('\n')
        idx += 1

    ldif.close()

    # Now import it
    try:
        master1.tasks.importLDIF(suffix=DEFAULT_SUFFIX, input_file=import_ldif,
                                 args={TASK_WAIT: True})
    except ValueError:
        log.fatal('test_reliab_7.5: Online import failed')
        assert False

    #
    # Initialize all the agreements
    #
    master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
    master1.waitForReplInit(m1_m2_agmt)

    # Check replication is working...
    if master1.testReplication(DEFAULT_SUFFIX, master2):
        log.info('Replication is working.')
    else:
        log.fatal('Replication is not working.')
        assert False

    # Clear out the tmp dir
    master1.clearTmpDir(__file__)

    # Delete each instance in the end
    def fin():
        master1.delete()
        master2.delete()
        if ENABLE_VALGRIND:
            sbin_dir = get_sbin_dir(prefix=master1.prefix)
            valgrind_disable(sbin_dir)
    request.addfinalizer(fin)

    return TopologyReplication(master1, master2)