Beispiel #1
0
    def test_basic_xdcr_with_cert_regenerate(self):

        cluster1 = self.servers[0:2]
        cluster2 = self.servers[2:4]
        remote_cluster_name = 'sslcluster'
        restCluster1 = RestConnection(cluster1[0])
        restCluster2 = RestConnection(cluster2[0])

        try:
            #Setup cluster1
            x509main(cluster1[0]).setup_master()
            x509main(cluster1[1])._setup_node_certificates(reload_cert=False)

            restCluster1.add_node('Administrator','password',cluster1[1].ip)
            known_nodes = ['ns_1@'+cluster1[0].ip,'ns_1@' + cluster1[1].ip]
            restCluster1.rebalance(known_nodes)
            self.assertTrue(self.check_rebalance_complete(restCluster1),"Issue with rebalance")
            restCluster1.create_bucket(bucket='default', ramQuotaMB=100)
            restCluster1.remove_all_replications()
            restCluster1.remove_all_remote_clusters()

            #Setup cluster2
            x509main(cluster2[0]).setup_master()
            x509main(cluster2[1])._setup_node_certificates(reload_cert=False)

            restCluster2.add_node('Administrator','password',cluster2[1].ip)
            known_nodes = ['ns_1@'+cluster2[0].ip,'ns_1@' + cluster2[1].ip]
            restCluster2.rebalance(known_nodes)
            self.assertTrue(self.check_rebalance_complete(restCluster2),"Issue with rebalance")
            restCluster2.create_bucket(bucket='default', ramQuotaMB=100)

            test = x509main.CACERTFILEPATH + x509main.CACERTFILE
            data  =  open(test, 'rb').read()
            restCluster1.add_remote_cluster(cluster2[0].ip,cluster2[0].port,'Administrator','password',remote_cluster_name,certificate=data)
            replication_id = restCluster1.start_replication('continuous','default',remote_cluster_name)

            #restCluster1.set_xdcr_param('default','default','pauseRequested',True)

            x509main(self.master)._delete_inbox_folder()
            x509main(self.master)._generate_cert(self.servers,root_cn="CB\ Authority")
            self.log.info ("Setting up the first cluster for new certificate")

            x509main(cluster1[0]).setup_master()
            x509main(cluster1[1])._setup_node_certificates(reload_cert=False)
            self.log.info ("Setting up the second cluster for new certificate")
            x509main(cluster2[0]).setup_master()
            x509main(cluster2[1])._setup_node_certificates(reload_cert=False)

            status = restCluster1.is_replication_paused('default','default')
            if not status:
                restCluster1.set_xdcr_param('default','default','pauseRequested',False)

            restCluster1.set_xdcr_param('default','default','pauseRequested',True)
            status = restCluster1.is_replication_paused('default','default')
            self.assertTrue(status,"Replication has not started after certificate upgrade")
        finally:
            known_nodes = ['ns_1@'+cluster2[0].ip,'ns_1@'+cluster2[1].ip]
            restCluster2.rebalance(known_nodes,['ns_1@' + cluster2[1].ip])
            self.assertTrue(self.check_rebalance_complete(restCluster2),"Issue with rebalance")
            restCluster2.delete_bucket()
Beispiel #2
0
    def test_basic_xdcr_with_cert_regenerate(self):

        cluster1 = self.servers[0:2]
        cluster2 = self.servers[2:4]
        remote_cluster_name = 'sslcluster'
        restCluster1 = RestConnection(cluster1[0])
        restCluster2 = RestConnection(cluster2[0])

        try:
            #Setup cluster1
            x509main(cluster1[0]).setup_master()
            x509main(cluster1[1])._setup_node_certificates(reload_cert=False)

            restCluster1.add_node('Administrator','password',cluster1[1].ip)
            known_nodes = ['ns_1@'+cluster1[0].ip,'ns_1@' + cluster1[1].ip]
            restCluster1.rebalance(known_nodes)
            self.assertTrue(self.check_rebalance_complete(restCluster1),"Issue with rebalance")
            restCluster1.create_bucket(bucket='default', ramQuotaMB=100)
            restCluster1.remove_all_replications()
            restCluster1.remove_all_remote_clusters()

            #Setup cluster2
            x509main(cluster2[0]).setup_master()
            x509main(cluster2[1])._setup_node_certificates(reload_cert=False)

            restCluster2.add_node('Administrator','password',cluster2[1].ip)
            known_nodes = ['ns_1@'+cluster2[0].ip,'ns_1@' + cluster2[1].ip]
            restCluster2.rebalance(known_nodes)
            self.assertTrue(self.check_rebalance_complete(restCluster2),"Issue with rebalance")
            restCluster2.create_bucket(bucket='default', ramQuotaMB=100)

            test = x509main.CACERTFILEPATH + x509main.CACERTFILE
            data  =  open(test, 'rb').read()
            restCluster1.add_remote_cluster(cluster2[0].ip,cluster2[0].port,'Administrator','password',remote_cluster_name,certificate=data)
            replication_id = restCluster1.start_replication('continuous','default',remote_cluster_name)

            #restCluster1.set_xdcr_param('default','default','pauseRequested',True)

            x509main(self.master)._delete_inbox_folder()
            x509main(self.master)._generate_cert(self.servers,root_cn="CB\ Authority")
            self.log.info ("Setting up the first cluster for new certificate")

            x509main(cluster1[0]).setup_master()
            x509main(cluster1[1])._setup_node_certificates(reload_cert=False)
            self.log.info ("Setting up the second cluster for new certificate")
            x509main(cluster2[0]).setup_master()
            x509main(cluster2[1])._setup_node_certificates(reload_cert=False)

            status = restCluster1.is_replication_paused('default','default')
            if not status:
                restCluster1.set_xdcr_param('default','default','pauseRequested',False)

            restCluster1.set_xdcr_param('default','default','pauseRequested',True)
            status = restCluster1.is_replication_paused('default','default')
            self.assertTrue(status,"Replication has not started after certificate upgrade")
        finally:
            known_nodes = ['ns_1@'+cluster2[0].ip,'ns_1@'+cluster2[1].ip]
            restCluster2.rebalance(known_nodes,['ns_1@' + cluster2[1].ip])
            self.assertTrue(self.check_rebalance_complete(restCluster2),"Issue with rebalance")
            restCluster2.delete_bucket()
    def cluster_xdcr_remote_clusters_read(self,username,password,host,port=8091, servers=None,cluster=None,httpCode=None,user_role=None):
        remote_cluster_name = 'rbac_cluster'
        rest = RestConnection(servers[0])
        remote_server01 = servers[1]
        remote_server02 = servers[2]
        rest_remote01 = RestConnection(remote_server01)
        rest_remote01.delete_bucket()
        rest_remote01.create_bucket(bucket='default', ramQuotaMB=100)
        rest_remote02 = RestConnection(remote_server02)

        #------ First Test the Get Requests for XDCR --------------#

        #Remove all remote cluster references
        rest.remove_all_replications()
        rest.remove_all_remote_clusters()

        #Add remote cluster reference and replications
        rest.add_remote_cluster(remote_server01.ip,8091,'Administrator','password',remote_cluster_name)
        time.sleep(20)
        replication_id = rest.start_replication('continuous','default',remote_cluster_name)

        _cluster_xdcr_remote_clusters_read ={
            "remove_cluser_read":"/pools/default/remoteClusters;GET",
        }

        result = self._return_http_code(_cluster_xdcr_remote_clusters_read,username,password,host=host,port=8091, httpCode=httpCode, user_role=user_role)
Beispiel #4
0
    def start_replication(self, master, slave, replication_type='continuous',
                          buckets=None, bidir=False, suffix='A'):
        """Add remote cluster and start replication"""

        master_rest_conn = RestConnection(master)
        remote_reference = 'remote_cluster_' + suffix

        master_rest_conn.add_remote_cluster(slave.ip, slave.port,
                                            slave.rest_username,
                                            slave.rest_password,
                                            remote_reference)

        if not buckets:
            buckets = self.get_buckets()
        else:
            buckets = self.get_buckets(reversed=True)

        for bucket in buckets:
            master_rest_conn.start_replication(replication_type, bucket,
                                               remote_reference)

        if self.parami('num_buckets', 1) > 1 and suffix == 'A':
            self.start_replication(slave, master, replication_type, buckets,
                                   suffix='B')

        if bidir:
            self.start_replication(slave, master, replication_type, buckets,
                                   suffix='B')
Beispiel #5
0
 def _link_clusters(self, src_cluster_name, src_master, dest_cluster_name,
                    dest_master):
     rest_conn_src = RestConnection(src_master)
     rest_conn_src.add_remote_cluster(dest_master.ip, dest_master.port,
                                      dest_master.rest_username,
                                      dest_master.rest_password,
                                      dest_cluster_name)
Beispiel #6
0
 def _link_create_replications(self, master_1, master_2, cluster_name):
     rest = RestConnection(master_1)
     rest.add_remote_cluster(master_2.ip, master_2.port, master_1.rest_username,
                              master_1.rest_password, cluster_name)
     time.sleep(30)
     if len(self._buckets) == 0:
         self._buckets = rest.get_buckets()
     for bucket in set(self._buckets):
         rep_id = rest.start_replication("continuous", bucket, cluster_name)
 def _link_create_replications(self, master_1, master_2, cluster_name):
     rest = RestConnection(master_1)
     rest.add_remote_cluster(master_2.ip, master_2.port, master_1.rest_username,
                              master_1.rest_password, cluster_name)
     time.sleep(30)
     if len(self._buckets) == 0:
         self._buckets = rest.get_buckets()
     for bucket in set(self._buckets):
         rep_database, rep_id = rest.start_replication("continuous", bucket, cluster_name)
Beispiel #8
0
    def test_continuous_unidirectional_deletes_2(self):
        cluster_ref_a = "cluster_ref_a"
        master_a = self._input.clusters.get(0)[0]
        rest_conn_a = RestConnection(master_a)

        cluster_ref_b = "cluster_ref_b"
        master_b = self._input.clusters.get(1)[0]
        rest_conn_b = RestConnection(master_b)

        # Load some data on cluster a. Do it a few times so that the seqnos are
        # bumped up and then delete it.
        kvstore = ClientKeyValueStore()
        self._params["ops"] = "set"
        load_thread_list = []
        for i in [1, 2, 3]:
            task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
            load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
                                                            self._buckets[0],
                                                            task_def, kvstore)
            load_thread_list.append(load_thread)

        for lt in load_thread_list:
            lt.start()
        for lt in load_thread_list:
            lt.join()
        time.sleep(10)

        self._params["ops"] = "delete"
        task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
        load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
                                                        self._buckets[0],
                                                        task_def, kvstore)
        load_thread.start()
        load_thread.join()

        # Start replication to replicate the deletes from cluster a
        # to cluster b where the keys never existed.
        replication_type = "continuous"
        rest_conn_a.add_remote_cluster(master_b.ip, master_b.port,
                                       master_b.rest_username,
                                       master_b.rest_password, cluster_ref_b)
        (rep_database, rep_id) = rest_conn_a.start_replication(replication_type,
                                                               self._buckets[0],
                                                               cluster_ref_b)
        self._state.append((rest_conn_a, cluster_ref_b, rep_database, rep_id))

        time.sleep(15)

        # Verify replicated data#
        self.assertTrue(XDCRBaseTest.verify_del_items(rest_conn_a,
                                                      rest_conn_b,
                                                      self._buckets[0],
                                                      kvstore.keys(),
                                                      self._poll_sleep,
                                                      self._poll_timeout),
                        "Changes feed verification failed")
    def cluster_bucket_xdcr_write(self,username,password,host,port=8091,servers=None,cluster=None,httpCode=None,user_role=None):
        _cluster_xdcr_settings_read = {
            "create_replication":"controller/createReplication;POST",
            "cancel_XDCR":"controller/cancelXDCR/<xid>;POST",
            "delete_XDCR":"controller/cancelXDCR/<xid>;DELETE"
        }

        rest = RestConnection(servers[0])
        rest.remove_all_replications()
        rest.remove_all_remote_clusters()

        remote_cluster_name = 'rbac_cluster'
        rest = RestConnection(servers[0])
        remote_server01 = servers[1]
        remote_server02 = servers[2]
        rest_remote01 = RestConnection(remote_server01)
        rest_remote01.delete_bucket()
        rest_remote01.create_bucket(bucket='default1', ramQuotaMB=100,proxyPort=11252)
        rest_remote02 = RestConnection(remote_server02)
        remote_id = rest.add_remote_cluster(remote_server01.ip,8091,'Administrator','password',remote_cluster_name)
        time.sleep(10)
        #replication_id = rest.start_replication('continuous','default',remote_cluster_name)

        param_map = {'replicationType': 'continuous','toBucket': 'default1','fromBucket': 'default','toCluster': remote_cluster_name,
                     'type': 'capi'}
        create_replication = {"create_replication":"controller/createReplication;POST;"+str(param_map)}
        result = self._return_http_code(create_replication,username,password,host=host,port=port, httpCode=httpCode, user_role=user_role)


        rest.remove_all_replications()
        rest.remove_all_remote_clusters()

        remote_id = rest.add_remote_cluster(remote_server01.ip,8091,'Administrator','password',remote_cluster_name)
        time.sleep(20)
        replication_id = rest.start_replication('continuous',fromBucket='default',toCluster=remote_cluster_name,toBucket='default1')
        replication_id = replication_id.replace("/","%2F")

        cancel_replication = {"cancel_XDCR":"controller/cancelXDCR/" + replication_id + ";POST"}
        result = self._return_http_code(cancel_replication,username,password,host=host,port=port, httpCode=httpCode, user_role=user_role)

        rest.remove_all_replications()
        rest.remove_all_remote_clusters()
        remote_id = rest.add_remote_cluster(remote_server01.ip,8091,'Administrator','password',remote_cluster_name)
        time.sleep(20)
        replication_id = rest.start_replication('continuous',fromBucket='default',toCluster=remote_cluster_name,toBucket='default1')
        replication_id = replication_id.replace("/","%2F")

        cancel_replication = {"cancel_XDCR":"controller/cancelXDCR/" + replication_id + ";DELETE"}
        result = self._return_http_code(cancel_replication,username,password,host=host,port=port, httpCode=httpCode, user_role=user_role)

        rest.remove_all_replications()
        rest.remove_all_remote_clusters()
        rest_remote01.delete_bucket('default1')
Beispiel #10
0
    def test_continuous_unidirectional_sets_deletes(self):
        cluster_ref_b = "cluster_ref_a"
        master_a = self._input.clusters.get(0)[0]
        rest_conn_a = RestConnection(master_a)

        cluster_ref_b = "cluster_ref_b"
        master_b = self._input.clusters.get(1)[0]
        rest_conn_b = RestConnection(master_b)

        # Start replication
        replication_type = "continuous"
        rest_conn_a.add_remote_cluster(master_b.ip, master_b.port,
                                       master_b.rest_username,
                                       master_b.rest_password, cluster_ref_b)
        (rep_database, rep_id) = rest_conn_a.start_replication(replication_type,
                                                               self._buckets[0],
                                                               cluster_ref_b)
        self._state.append((rest_conn_a, cluster_ref_b, rep_database, rep_id))

        # Start load
        kvstore = ClientKeyValueStore()
        self._params["ops"] = "set"
        task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
        load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
                                                        self._buckets[0],
                                                        task_def, kvstore)
        load_thread.start()
        load_thread.join()

        # Do some deletes
        self._params["ops"] = "delete"
        self._params["count"] = self._num_items/5
        task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
        load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
                                                        self._buckets[0],
                                                        task_def, kvstore)
        load_thread.start()
        load_thread.join()

        # Verify replication
        self.assertTrue(XDCRBaseTest.verify_replicated_data(rest_conn_b,
                                                            self._buckets[0],
                                                            kvstore,
                                                            self._poll_sleep,
                                                            self._poll_timeout),
                        "Verification of replicated data failed")
        self.assertTrue(XDCRBaseTest.verify_replicated_revs(rest_conn_a,
                                                            rest_conn_b,
                                                            self._buckets[0],
                                                            self._poll_sleep,
                                                            self._poll_timeout),
                        "Verification of replicated revisions failed")
Beispiel #11
0
    def test_basic_xdcr_with_cert(self):

        cluster1 = self.servers[0:2]
        cluster2 = self.servers[2:4]
        remote_cluster_name = 'sslcluster'
        restCluster1 = RestConnection(cluster1[0])
        restCluster2 = RestConnection(cluster2[0])

        try:
            #Setup cluster1
            x509main(cluster1[0]).setup_master()
            x509main(cluster1[1])._setup_node_certificates(reload_cert=False)

            restCluster1.add_node('Administrator', 'password', cluster1[1].ip)
            known_nodes = ['ns_1@' + cluster1[0].ip, 'ns_1@' + cluster1[1].ip]
            restCluster1.rebalance(known_nodes)
            self.assertTrue(self.check_rebalance_complete(restCluster1),
                            "Issue with rebalance")
            restCluster1.create_bucket(bucket='default', ramQuotaMB=100)
            restCluster1.remove_all_replications()
            restCluster1.remove_all_remote_clusters()

            #Setup cluster2
            x509main(cluster2[0]).setup_master()
            x509main(cluster2[1])._setup_node_certificates(reload_cert=False)

            restCluster2.add_node('Administrator', 'password', cluster2[1].ip)
            known_nodes = ['ns_1@' + cluster2[0].ip, 'ns_1@' + cluster2[1].ip]
            restCluster2.rebalance(known_nodes)
            self.assertTrue(self.check_rebalance_complete(restCluster2),
                            "Issue with rebalance")
            restCluster2.create_bucket(bucket='default', ramQuotaMB=100)

            test = x509main.CACERTFILEPATH + x509main.CACERTFILE
            data = open(test, 'rb').read()
            restCluster1.add_remote_cluster(cluster2[0].ip,
                                            cluster2[0].port,
                                            'Administrator',
                                            'password',
                                            remote_cluster_name,
                                            certificate=data)
            replication_id = restCluster1.start_replication(
                'continuous', 'default', remote_cluster_name)
            if replication_id is not None:
                self.assertTrue(True,
                                "Replication was not created successfully")
        finally:
            known_nodes = ['ns_1@' + cluster2[0].ip, 'ns_1@' + cluster2[1].ip]
            restCluster2.rebalance(known_nodes, ['ns_1@' + cluster2[1].ip])
            self.assertTrue(self.check_rebalance_complete(restCluster2),
                            "Issue with rebalance")
            restCluster2.delete_bucket()
Beispiel #12
0
    def test_XDCR_with_ldap_setup_half_encryption(self):
        rest2 = RestConnection(self.servers[1])
        rest1 = RestConnection(self.servers[0])

        rest2.remove_all_replications()
        rest2.remove_all_remote_clusters()

        rest2.create_bucket("default", ramQuotaMB=100)
        rest1.create_bucket("default", ramQuotaMB=100)
        remote_cluster2 = 'C2'
        remote_server01 = self.servers[0]

        remote_id = rest2.add_remote_cluster(remote_server01.ip,
                                             8091,
                                             'cbadminbucket',
                                             'password',
                                             remote_cluster2,
                                             demandEncryption="on",
                                             encryptionType="half")
        replication_id = rest2.start_replication('continuous', 'default',
                                                 remote_cluster2)
        if replication_id is not None:
            self.assertTrue(True, "Replication was not created successfully")

        rest2.remove_all_replications()
        rest2.remove_all_remote_clusters()
    def cluster_xdcr_remote_clusters_write(self,username,password,host,port=8091, servers=None,cluster=None,httpCode=None,user_role=None):

        rest = RestConnection(servers[0])
        rest.remove_all_replications()
        rest.remove_all_remote_clusters()

        _cluster_xdcr_remove_cluster_write = {
            "remoteClusters":"pools/default/remoteClusters;POST",
            "remote_cluster_id":"pools/default/remoteClusters/<id>;PUT",
            "delete_remote":"pools/default/remoteClusters/<id>;DELETE"
        }

        params = {'hostname': "{0}:{1}".format(servers[1].ip, servers[1].port),'username': '******','password': '******','name':'rbac_remote01'}
        add_node = {"remoteClusters":"pools/default/remoteClusters;POST;" + str(params)}
        result = self._return_http_code(add_node,username,password,host=host,port=port, httpCode=httpCode, user_role=user_role)

        rest.remove_all_replications()
        rest.remove_all_remote_clusters()

        remote_cluster_name = 'rbac_cluster'
        rest = RestConnection(servers[0])
        remote_server01 = servers[1]
        remote_server02 = servers[2]
        rest_remote01 = RestConnection(remote_server01)
        rest_remote01.delete_bucket()
        rest_remote01.create_bucket(bucket='default', ramQuotaMB=100)
        rest_remote02 = RestConnection(remote_server02)

        rest.remove_all_replications()
        rest.remove_all_remote_clusters()
        remote_id = rest.add_remote_cluster(remote_server01.ip,8091,'Administrator','password',remote_cluster_name)
        time.sleep(20)
        delete_remote = {"delete_remote":"pools/default/remoteClusters/" + str(remote_cluster_name) + ";DELETE"}
        result = self._return_http_code(delete_remote,username,password,host=host,port=port, httpCode=httpCode, user_role=user_role)
    def cluster_xdcr_settings_write(self,username,password,host,port=8091,servers=None,cluster=None,httpCode=None,user_role=None):

        _cluster_xdcr_settings_read = {
            "replication_settings":"settings/replications;POST;{'httpConnections': 20}"
        }

        rest = RestConnection(servers[0])
        rest.remove_all_replications()
        rest.remove_all_remote_clusters()

        remote_cluster_name = 'rbac_cluster'
        rest = RestConnection(servers[0])
        remote_server01 = servers[1]
        remote_server02 = servers[2]
        rest_remote01 = RestConnection(remote_server01)
        rest_remote01.delete_bucket()
        rest_remote01.create_bucket(bucket='default', ramQuotaMB=100)
        rest_remote02 = RestConnection(remote_server02)
        rest_remote02.delete_bucket()
        remote_id = rest.add_remote_cluster(remote_server01.ip,8091,'Administrator','password',remote_cluster_name)
        time.sleep(20)
        replication_id = rest.start_replication('continuous','default',remote_cluster_name)

        result = self._return_http_code(_cluster_xdcr_settings_read,username,password,host=host,port=port, httpCode=httpCode, user_role=user_role)

        rest.remove_all_replications()
        rest.remove_all_remote_clusters()
        rest_remote01.delete_bucket()
    def cluster_bucket_xdcr_read(self,username,password,host,port=8091,servers=None,cluster=None,httpCode=None,user_role=None):
        _cluster_bucket_xdcr_read = {
            "replication_settings":"settings/replications/<id>;GET"
        }

        rest = RestConnection(servers[0])
        rest.remove_all_replications()
        rest.remove_all_remote_clusters()

        remote_cluster_name = 'rbac_cluster'
        rest = RestConnection(servers[0])
        remote_server01 = servers[1]
        remote_server02 = servers[2]
        rest_remote01 = RestConnection(remote_server01)
        rest_remote01.delete_bucket()
        rest_remote01.create_bucket(bucket='default', ramQuotaMB=100)
        rest_remote02 = RestConnection(remote_server02)
        remote_id = rest.add_remote_cluster(remote_server01.ip,8091,'Administrator','password',remote_cluster_name)
        replication_id = rest.start_replication('continuous','default',remote_cluster_name)
        replication_id = replication_id.replace("/","%2F")

        bucket_xdcr_read = {"replication_settings":"settings/replications/" + replication_id + ";GET"}
        result = self._return_http_code(bucket_xdcr_read,username,password,host=host,port=port, httpCode=httpCode, user_role=user_role)

        rest.remove_all_replications()
        rest.remove_all_remote_clusters()
        rest_remote01.delete_bucket()
Beispiel #16
0
 def _start_es_replication(self, bucket='default', xdcr_params={}):
     rest_conn = RestConnection(self.src_cluster.get_master_node())
     if bucket == 'default':
         self.log.info("Creating default bucket")
         rest_conn.create_bucket(bucket='default', ramQuotaMB=100, authType='none', saslPassword='', replicaNumber=1,
                             proxyPort=11211, bucketType='membase', replica_index=1, threadsNumber=3,
                             flushEnabled=1, lww=False)
         self.src_cluster.add_bucket(ramQuotaMB=100, bucket='default', authType='none',
                                saslPassword='', replicaNumber=1, proxyPort=11211, bucketType='membase',
                                evictionPolicy='valueOnly')
     elif bucket == 'sasl':
         self.log.info("Creating sasl bucket")
         rest_conn.create_bucket(bucket='sasl', ramQuotaMB=100, authType='sasl', saslPassword='******', replicaNumber=1,
                             proxyPort=11211, bucketType='membase', replica_index=1, threadsNumber=3,
                             flushEnabled=1, lww=False)
         self.src_cluster.add_bucket(ramQuotaMB=100, bucket='sasl', authType='sasl',
                                saslPassword='******', replicaNumber=1, proxyPort=11211, bucketType='membase',
                                evictionPolicy='valueOnly')
     elif bucket == 'standard':
         self.log.info("Creating standard bucket")
         rest_conn.create_bucket(bucket='standard', ramQuotaMB=100, authType='none', saslPassword='', replicaNumber=1,
                             proxyPort=STANDARD_BUCKET_PORT, bucketType='membase', replica_index=1, threadsNumber=3,
                             flushEnabled=1, lww=False)
         self.src_cluster.add_bucket(ramQuotaMB=100, bucket='standard', authType='none',
                                saslPassword='', replicaNumber=1, proxyPort=STANDARD_BUCKET_PORT, bucketType='membase',
                                evictionPolicy='valueOnly')
     elif bucket== 'lww':
         self.log.info("Creating lww bucket")
         rest_conn.create_bucket(bucket='lww', ramQuotaMB=100, authType='none', saslPassword='', replicaNumber=1,
                             proxyPort=11211, bucketType='membase', replica_index=1, threadsNumber=3,
                             flushEnabled=1, lww=True)
         self.src_cluster.add_bucket(ramQuotaMB=100, bucket='lww', authType='none',
                                saslPassword='', replicaNumber=1, proxyPort=11211, bucketType='membase',
                                evictionPolicy='valueOnly')
     esrest_conn = EsRestConnection(self.dest_cluster.get_master_node())
     esrest_conn.create_index(bucket)
     rest_conn.add_remote_cluster(remoteIp=self.dest_master.ip, remotePort=9091, username='******',
                                  password='******', name='es')
     self.src_cluster.get_remote_clusters().append(XDCRRemoteClusterRef(self.src_cluster, self.dest_cluster,
                                                                    Utility.get_rc_name(self.src_cluster.get_name(),
                                                                                     self.dest_cluster.get_name())))
     repl_id = rest_conn.start_replication(replicationType='continuous', fromBucket=bucket, toCluster='es',
                                           rep_type='capi', toBucket=bucket, xdcr_params=xdcr_params)
     return repl_id
Beispiel #17
0
    def test_basic_xdcr_with_cert(self):

        cluster1 = self.servers[0:2]
        cluster2 = self.servers[2:4]
        remote_cluster_name = 'sslcluster'
        restCluster1 = RestConnection(cluster1[0])
        restCluster2 = RestConnection(cluster2[0])

        try:
            #Setup cluster1
            x509main(cluster1[0]).setup_master()
            x509main(cluster1[1])._setup_node_certificates(reload_cert=False)

            restCluster1.add_node('Administrator','password',cluster1[1].ip)
            known_nodes = ['ns_1@'+cluster1[0].ip,'ns_1@' + cluster1[1].ip]
            restCluster1.rebalance(known_nodes)
            self.assertTrue(self.check_rebalance_complete(restCluster1),"Issue with rebalance")
            restCluster1.create_bucket(bucket='default', ramQuotaMB=100)
            restCluster1.remove_all_replications()
            restCluster1.remove_all_remote_clusters()

            #Setup cluster2
            x509main(cluster2[0]).setup_master()
            x509main(cluster2[1])._setup_node_certificates(reload_cert=False)

            restCluster2.add_node('Administrator','password',cluster2[1].ip)
            known_nodes = ['ns_1@'+cluster2[0].ip,'ns_1@' + cluster2[1].ip]
            restCluster2.rebalance(known_nodes)
            self.assertTrue(self.check_rebalance_complete(restCluster2),"Issue with rebalance")
            restCluster2.create_bucket(bucket='default', ramQuotaMB=100)

            test = x509main.CACERTFILEPATH + x509main.CACERTFILE
            data  =  open(test, 'rb').read()
            restCluster1.add_remote_cluster(cluster2[0].ip,cluster2[0].port,'Administrator','password',remote_cluster_name,certificate=data)
            replication_id = restCluster1.start_replication('continuous','default',remote_cluster_name)
            if replication_id is not None:
                self.assertTrue(True,"Replication was not created successfully")
        finally:
            known_nodes = ['ns_1@'+cluster2[0].ip,'ns_1@'+cluster2[1].ip]
            restCluster2.rebalance(known_nodes,['ns_1@' + cluster2[1].ip])
            self.assertTrue(self.check_rebalance_complete(restCluster2),"Issue with rebalance")
            restCluster2.delete_bucket()
Beispiel #18
0
    def _XDCR_role_test(self):
        params = {}
        remote_cluster_name = 'rbac_cluster'
        remote_server01 = self.servers[1]
        remote_server02 = self.servers[2]
        read_role = '_replication_admin_read'
        write_role = '_replication_admin_write'
        rest_remote01 = RestConnection(remote_server01)
        rest_remote01.create_bucket(bucket='default', ramQuotaMB=100)
        rest_remote02 = RestConnection(remote_server02)
        rest_remote02.create_bucket(bucket='default', ramQuotaMB=100)

        #------ First Test the Get Requests for XDCR --------------#

        #Remove all remote cluster references
        self.rest.remove_all_replications()
        self.rest.remove_all_remote_clusters()

        #Add remote cluster reference and replications
        self.rest.add_remote_cluster(remote_server01.ip,8091,'Administrator','password',remote_cluster_name)
        replication_id = self.rest.start_replication('continuous','default',remote_cluster_name)
        masDict,tc_status = self.rbac._iterate_role_mapping(read_role,"Administrator","password",{'replication_id':replication_id})

        self.rest.remove_all_replications()
        self.rest.remove_all_remote_clusters()
        rest_remote01.remove_all_replications()
        rest_remote01.remove_all_remote_clusters()
        rest_remote02.remove_all_replications()
        rest_remote02.remove_all_remote_clusters()


        # ----------- Second Test for POST requests for XDCR ---------------#

        self.rest.remove_all_replications()
        self.rest.remove_all_remote_clusters()
        rest_remote01.remove_all_replications()
        rest_remote01.remove_all_remote_clusters()
        rest_remote02.remove_all_replications()
        rest_remote02.remove_all_remote_clusters()


        self.rest.add_remote_cluster(remote_server01.ip,8091,'Administrator','password',"onetotwo")
        #self.rest.add_remote_cluster(remote_server02.ip,8091,'Administrator','password','onetothree')
        #rest_remote01.add_remote_cluster(remote_server02.ip,8091,'Administrator','password',"twotothree")
        rest_remote01.add_remote_cluster(self.master.ip,8091,'Administrator','password','twotoone')
        rest_remote02.add_remote_cluster(remote_server01.ip,8091,'Administrator','password',"threetotwo")
        rest_remote02.add_remote_cluster(self.master.ip,8091,'Administrator','password','threetoone')

        params['remote_cluster_name']='onetotwo'
        params['remoteCluster01'] = {'username': '******', 'password': '******', 'hostname': '192.168.46.103:8091', 'name': 'onetothree'}

        params['create_replication'] = {'replicationType': 'continuous','toBucket': 'default','fromBucket': 'default','toCluster': 'twotoone','type': 'xmem'}
        params['replication_id'] = rest_remote01.start_replication('continuous','default','twotoone')



        masDict,tc_status = self.rbac._iterate_role_mapping('_replication_admin_write01',"Administrator","password",params)
        masDict,tc_status = self.rbac._iterate_role_mapping('_replication_admin_write02',"Administrator","password",params,self.servers[1])

        '''
Beispiel #19
0
    def start_replication(self,
                          master,
                          slave,
                          replication_type='continuous',
                          buckets=None,
                          bidir=False,
                          suffix='A'):
        """Add remote cluster and start replication"""

        master_rest_conn = RestConnection(master)
        remote_reference = 'remote_cluster_' + suffix

        master_rest_conn.add_remote_cluster(slave.ip, slave.port,
                                            slave.rest_username,
                                            slave.rest_password,
                                            remote_reference)

        if not buckets:
            buckets = self.get_buckets()
        else:
            buckets = self.get_buckets(reversed=True)

        for bucket in buckets:
            master_rest_conn.start_replication(replication_type, bucket,
                                               remote_reference)

        if self.parami('num_buckets', 1) > 1 and suffix == 'A':
            self.start_replication(slave,
                                   master,
                                   replication_type,
                                   buckets,
                                   suffix='B')

        if bidir:
            self.start_replication(slave,
                                   master,
                                   replication_type,
                                   buckets,
                                   suffix='B')
Beispiel #20
0
    def add(self):
        """create cluster reference- add remote cluster
        """
        rest_conn_src = RestConnection(self.__src_cluster.master)
        certificate = ""
        dest_master = self.__dest_cluster.master
        if self.__encryption:
            rest_conn_dest = RestConnection(dest_master)
            certificate = rest_conn_dest.get_cluster_ceritificate()

        if self.__replicator_target_role:
            self.dest_user = "******"
            self.dest_pass = "******"
        else:
            self.dest_user = dest_master.rest_username
            self.dest_pass = dest_master.rest_password

        if not self.__use_scramsha:
            self.__rest_info = rest_conn_src.add_remote_cluster(
                dest_master.ip,
                dest_master.port,
                self.dest_user,
                self.dest_pass,
                self.name,
                demandEncryption=self.__encryption,
                certificate=certificate)
        else:
            print("Using scram-sha authentication")
            self.__rest_info = rest_conn_src.add_remote_cluster(
                dest_master.ip,
                dest_master.port,
                self.dest_user,
                self.dest_pass,
                self.name,
                demandEncryption=self.__encryption,
                encryptionType="half")

        self.__validate_create_event()
Beispiel #21
0
class rbacTest(ldaptest):

    def setUp(self):
        super(rbacTest, self).setUp()
        self.rest = RestConnection(self.master)
        self.userList = "bjons:password"
        self.userList = self.returnUserList(self.userList)
        #self._removeLdapUserRemote(self.userList)
        #self._createLDAPUser(self.userList)
        self.roleName = self.input.param("roleName")
        self.rbac = rbacmain(self.master,'default')
        #rest.ldapUserRestOperation(True, ROadminUser=self.userList, exclude=None)

    def tearDown(self):
        super(rbacTest, self).tearDown()

    def _XDCR_role_test(self):
        params = {}
        remote_cluster_name = 'rbac_cluster'
        remote_server01 = self.servers[1]
        remote_server02 = self.servers[2]
        read_role = '_replication_admin_read'
        write_role = '_replication_admin_write'
        rest_remote01 = RestConnection(remote_server01)
        rest_remote01.create_bucket(bucket='default', ramQuotaMB=100)
        rest_remote02 = RestConnection(remote_server02)
        rest_remote02.create_bucket(bucket='default', ramQuotaMB=100)

        #------ First Test the Get Requests for XDCR --------------#

        #Remove all remote cluster references
        self.rest.remove_all_replications()
        self.rest.remove_all_remote_clusters()

        #Add remote cluster reference and replications
        self.rest.add_remote_cluster(remote_server01.ip,8091,'Administrator','password',remote_cluster_name)
        replication_id = self.rest.start_replication('continuous','default',remote_cluster_name)
        masDict,tc_status = self.rbac._iterate_role_mapping(read_role,"Administrator","password",{'replication_id':replication_id})

        self.rest.remove_all_replications()
        self.rest.remove_all_remote_clusters()
        rest_remote01.remove_all_replications()
        rest_remote01.remove_all_remote_clusters()
        rest_remote02.remove_all_replications()
        rest_remote02.remove_all_remote_clusters()


        # ----------- Second Test for POST requests for XDCR ---------------#

        self.rest.remove_all_replications()
        self.rest.remove_all_remote_clusters()
        rest_remote01.remove_all_replications()
        rest_remote01.remove_all_remote_clusters()
        rest_remote02.remove_all_replications()
        rest_remote02.remove_all_remote_clusters()


        self.rest.add_remote_cluster(remote_server01.ip,8091,'Administrator','password',"onetotwo")
        #self.rest.add_remote_cluster(remote_server02.ip,8091,'Administrator','password','onetothree')
        #rest_remote01.add_remote_cluster(remote_server02.ip,8091,'Administrator','password',"twotothree")
        rest_remote01.add_remote_cluster(self.master.ip,8091,'Administrator','password','twotoone')
        rest_remote02.add_remote_cluster(remote_server01.ip,8091,'Administrator','password',"threetotwo")
        rest_remote02.add_remote_cluster(self.master.ip,8091,'Administrator','password','threetoone')

        params['remote_cluster_name']='onetotwo'
        params['remoteCluster01'] = {'username': '******', 'password': '******', 'hostname': '192.168.46.103:8091', 'name': 'onetothree'}

        params['create_replication'] = {'replicationType': 'continuous','toBucket': 'default','fromBucket': 'default','toCluster': 'twotoone','type': 'xmem'}
        params['replication_id'] = rest_remote01.start_replication('continuous','default','twotoone')



        masDict,tc_status = self.rbac._iterate_role_mapping('_replication_admin_write01',"Administrator","password",params)
        masDict,tc_status = self.rbac._iterate_role_mapping('_replication_admin_write02',"Administrator","password",params,self.servers[1])

        '''
        self.rest.remove_all_replications()
        self.rest.remove_all_remote_clusters()
        rest_remote01.remove_all_replications()
        rest_remote01.remove_all_remote_clusters()
        rest_remote02.remove_all_replications()
        rest_remote02.remove_all_remote_clusters()
        '''


    def _raw_urllib(self):
        api = "http://192.168.46.101:8092/" + "default/_design/test1"
        header = {'Content-Type': 'application/json'}
        params = '{"views":{"test1":{"map":"function (doc, meta) {emit(meta.id, null);}"}}}'
        http = httplib2.Http()
        status, content = http.request(api, 'PUT', headers=header,body=params)
        print status
        print content


    def _view_admin_role_test(self):
        #----------------Get ddocs -----------#
        view = ['abcd']
        default_design_doc_name = "Doc1"
        default_map_func = 'function (doc) { emit(doc.age, doc.first_name);}'
        params = {}
        view = View('abcd', default_map_func, None)
        self.cluster.create_view(
             self.master, default_design_doc_name, view,
                    'default',180,with_query=False)
        masDict,tc_status = self.rbac._iterate_role_mapping('_view_admin_read',"Administrator","password")

        doc_id = "_design/dev_Doc1"

        params['doc_id'] = doc_id
        test_param = {"views" : {"byloc" : {"map" : "function (doc, meta) {\n  if (meta.type == \"json\") {\n    emit(doc.city, doc.sales);\n  } else {\n    emit([\"blob\"]);\n  }\n}"}}}

        masDict,tc_status = self.rbac._iterate_role_mapping('_view_admin_write01',"Administrator","password",params)


    # ------------------------------------#

    def checkRole(self):
        masDict = {}
        tc_status = None
        #---Setup for XDCR ------#
        #self._XDCR_role_test()
        #self._view_admin_role_test()
        self._raw_urllib()
Beispiel #22
0
    def _link_es_cluster(self):
	rest_conn_src = RestConnection(self.master)
        rest_conn_src.add_remote_cluster(self.es_host, self.es_port,
            "Administrator",
            "password", "ElasticSearch")
	self.es_cluster_name = "ElasticSearch"
Beispiel #23
0
 def _link_clusters(self, src_cluster_name, src_master, dest_cluster_name, dest_master):
     rest_conn_src = RestConnection(src_master)
     rest_conn_src.add_remote_cluster(dest_master.ip, dest_master.port,
                                    dest_master.rest_username,
                                    dest_master.rest_password, dest_cluster_name)
Beispiel #24
0
    def test_continuous_unidirectional_recreates(self):
        cluster_ref_a = "cluster_ref_a"
        master_a = self._input.clusters.get(0)[0]
        rest_conn_a = RestConnection(master_a)

        cluster_ref_b = "cluster_ref_b"
        master_b = self._input.clusters.get(1)[0]
        rest_conn_b = RestConnection(master_b)

        # Disable compaction, we should remove these
        # once the bug in comapctor get fixed
        #rest_conn_a.set_auto_compaction("false", 100, 100)
        #rest_conn_b.set_auto_compaction("false", 100, 100)

        # Start load
        kvstore = ClientKeyValueStore()
        self._params["ops"] = "set"
        task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
        load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
                                                        self._buckets[0],
                                                        task_def, kvstore)
        load_thread.start()
        load_thread.join()

        # Start replication
        replication_type = "continuous"
        rest_conn_a.add_remote_cluster(master_b.ip, master_b.port,
                                       master_b.rest_username,
                                       master_b.rest_password, cluster_ref_b)
        (rep_database, rep_id) = rest_conn_a.start_replication(replication_type,
                                                               self._buckets[0],
                                                               cluster_ref_b)
        self._state.append((rest_conn_a, cluster_ref_b, rep_database, rep_id))

        # Verify replicated data
        self.assertTrue(XDCRBaseTest.verify_replicated_data(rest_conn_b,
                                                            self._buckets[0],
                                                            kvstore,
                                                            self._poll_sleep,
                                                            self._poll_timeout),
                        "Replication verification failed")

        # Delete all keys
        self._params["ops"] = "delete"
        task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
        load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
                                                        self._buckets[0],
                                                        task_def, kvstore)
        load_thread.start()
        load_thread.join()

        # Verify replicated data
        self.assertTrue(XDCRBaseTest.verify_replicated_data(rest_conn_b,
                                                            self._buckets[0],
                                                            kvstore,
                                                            self._poll_sleep,
                                                            self._poll_timeout),
                        "Replication verification failed")

        # Recreate the keys with different values
        kvstore = ClientKeyValueStore()
        self._params["ops"] = "set"
        self._params["padding"] = "recreated"
        task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
        load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
                                                        self._buckets[0],
                                                        task_def, kvstore)
        load_thread.start()
        load_thread.join()

        # Verify replicated data
        self.assertTrue(XDCRBaseTest.verify_replicated_data(rest_conn_b,
                                                            self._buckets[0],
                                                            kvstore,
                                                            self._poll_sleep,
                                                            self._poll_timeout),
                        "Replication verification failed")
Beispiel #25
0
    def test_failover_source_sets(self):
        replication_type = "continuous"
        self.log.info("Force initial rebalance.")

        # This test starts with a 2-2 unidirectional replication from cluster a
        # to cluster b; during the replication, we trigger failover of one node
        # on source cluster , resulting a  1-2 replication.
        # After all loading finish, verify data and rev on both clusters.
        replication_type = "continuous"
        self.log.info("Force initial rebalance.")

        cluster_ref_a = "cluster_ref_a"
        master_a = self._input.clusters.get(0)[0]
        rest_conn_a = RestConnection(master_a)

        cluster_ref_b = "cluster_ref_b"
        master_b = self._input.clusters.get(1)[0]
        rest_conn_b = RestConnection(master_b)

        self.log.info("START XDC replication...")

        # Start replication
        rest_conn_a.add_remote_cluster(master_b.ip, master_b.port,
                                       master_b.rest_username,
                                       master_b.rest_password, cluster_ref_b)
        (rep_database, rep_id) = rest_conn_a.start_replication(replication_type,
                                                               self._buckets[0],
                                                               cluster_ref_b)
        self._state.append((rest_conn_a, cluster_ref_b, rep_database, rep_id))

        # Start load
        self.log.info("START loading data...")
        load_thread_list = []
        kvstore = ClientKeyValueStore()
        self._params["ops"] = "set"
        task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
        load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
            self._buckets[0],
            task_def, kvstore)
        load_thread.start()
        # sleep a while to allow more data loaded
        time.sleep(5)

        self.log.info("current nodes on source cluster: {0}".format(RebalanceHelper.getOtpNodeIds(master_a)))

        # Trigger failover, we fail over one node each time until there is only one node remaining
        self.log.info("DURING replication, start failover...")
        self.log.info("FAILOVER nodes on Cluster A ...")
        nodes_a = rest_conn_a.node_statuses()
        while len(nodes_a) > 1:
            toBeFailedOverNode = RebalanceHelper.pick_node(master_a)
            self.log.info("failover node {0}".format(toBeFailedOverNode.id))
            rest_conn_a.fail_over(toBeFailedOverNode)
            self.log.info("rebalance after failover")
            rest_conn_a.rebalance(otpNodes=[node.id for node in rest_conn_a.node_statuses()], \
                ejectedNodes=[toBeFailedOverNode.id])
            self.assertTrue(rest_conn_a.monitorRebalance(),
                msg="rebalance operation failed after removing node {0}".format(toBeFailedOverNode.id))
            nodes_a = rest_conn_a.node_statuses()

        self.log.info("ALL failed over done...")

        # Wait for loading threads to finish
        for lt in load_thread_list:
            lt.join()
        self.log.info("All loading threads finished")

        # Verify replication
        self.log.info("START data verification at cluster A...")
        self.assertTrue(XDCRBaseTest.verify_replicated_data(rest_conn_a,
            self._buckets[0],
            kvstore,
            self._poll_sleep,
            self._poll_timeout),
            "Verification of replicated data failed")

        self.log.info("START data verification at cluster B...")
        self.assertTrue(XDCRBaseTest.verify_replicated_data(rest_conn_b,
            self._buckets[0],
            kvstore,
            self._poll_sleep,
            self._poll_timeout),
            "Verification of replicated data failed")

        self.log.info("START revision verification on both clusters...")
        self.assertTrue(XDCRBaseTest.verify_replicated_revs(rest_conn_a,
            rest_conn_b,
            self._buckets[0],
            self._poll_sleep,
            self._poll_timeout),
            "Verification of replicated revisions failed")
Beispiel #26
0
    def test_rebalance_in_dest_sets(self):
        # This test starts with a 1-1 unidirectional replication from cluster a
        # to cluster b; during the replication, we trigger rebalace-in on dest
        # cluster b, to create a 1-2 replication. After all loading finish,
        # verify data and rev on both clusters.
        replication_type = "continuous"

        cluster_ref_a = "cluster_ref_a"
        master_a = self._input.clusters.get(0)[0]
        rest_conn_a = RestConnection(master_a)

        cluster_ref_b = "cluster_ref_b"
        master_b = self._input.clusters.get(1)[0]
        rest_conn_b = RestConnection(master_b)

        self.log.info("START XDC replication...")

        # Start replication
        rest_conn_a.add_remote_cluster(master_b.ip, master_b.port,
                                       master_b.rest_username,
                                       master_b.rest_password, cluster_ref_b)
        (rep_database, rep_id) = rest_conn_a.start_replication(replication_type,
                                                               self._buckets[0],
                                                               cluster_ref_b)
        self._state.append((rest_conn_a, cluster_ref_b, rep_database, rep_id))

        # Start load
        self.log.info("START loading data...")
        load_thread_list = []
        kvstore = ClientKeyValueStore()
        self._params["ops"] = "set"
        task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
        load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
            self._buckets[0],
            task_def, kvstore)
        load_thread.start()

        # Trigger rebalance
        self.log.info("DURING replication, start rebalancing...")
        servers_a = self._input.clusters.get(0)
        self.log.info("REBALANCING IN Cluster B ...")
        RebalanceHelper.rebalance_in(servers_b, len(servers_b)-1, monitor=False)
        self.assertTrue(rest_conn_b.monitorRebalance(),
            msg="rebalance operation on cluster {0}".format(servers_b))

        self.log.info("ALL rebalancing done...")

        # Wait for loading to finish
        load_thread.join()
        self.log.info("All deleting threads finished")

        # Verify replication
        self.log.info("START data verification at cluster A...")
        self.assertTrue(XDCRBaseTest.verify_replicated_data(rest_conn_a,
            self._buckets[0],
            kvstore,
            self._poll_sleep,
            self._poll_timeout),
            "Verification of replicated data failed")

        self.log.info("START data verification at cluster B...")
        self.assertTrue(XDCRBaseTest.verify_replicated_data(rest_conn_b,
            self._buckets[0],
            kvstore,
            self._poll_sleep,
            self._poll_timeout),
            "Verification of replicated data failed")

        self.log.info("START revision verification on both clusters...")
        self.assertTrue(XDCRBaseTest.verify_replicated_revs(rest_conn_a,
            rest_conn_b,
            self._buckets[0],
            self._poll_sleep,
            self._poll_timeout),
            "Verification of replicated revisions failed")
class XDCRUpgradeCollectionsTests(NewUpgradeBaseTest):
    def setUp(self):
        super(XDCRUpgradeCollectionsTests, self).setUp()
        self.nodes_init = self.input.param('nodes_init', 2)
        self.queue = queue.Queue()
        self.rate_limit = self.input.param("rate_limit", 100000)
        self.batch_size = self.input.param("batch_size", 1000)
        self.doc_size = self.input.param("doc_size", 100)
        self.loader = self.input.param("loader", "high_doc_ops")
        self.instances = self.input.param("instances", 4)
        self.threads = self.input.param("threads", 5)
        self.use_replica_to = self.input.param("use_replica_to", False)
        self.index_name_prefix = None
        self.rest_src = RestConnection(self.servers[0])

    def tearDown(self):
        super(XDCRUpgradeCollectionsTests, self).tearDown()

    def enable_migration_mode(self, src_bucket, dest_bucket):
        setting_val_map = {"collectionsMigrationMode": "true",
                           "colMappingRules": '{"REGEXP_CONTAINS(META().id,\'0$\')":"scope1.mycollection_scope1"}'
                           }
        self.rest_src.set_xdcr_params(src_bucket, dest_bucket, setting_val_map)

    def verify_doc_counts(self):
        des_master = self.servers[self.nodes_init]
        src_cbver = RestConnection(self.master).get_nodes_version()
        des_cbver = RestConnection(des_master).get_nodes_version()
        src_items = RestConnection(self.master).get_buckets_itemCount()
        des_items = RestConnection(des_master).get_buckets_itemCount()
        if src_cbver[:3] < "7.0" and des_cbver[:3] >= "7.0":
            des_items = self.get_col_item_count(des_master, "default", "_default",
                                                "_default", self.des_stat_col)
            if src_items["default"] != des_items:
                self.fail("items do not match. src: {0} != des: {1}"
                          .format(src_items["default"], des_items))
        elif src_cbver[:3] >= "7.0" and des_cbver[:3] < "7.0":
            src_items = self.get_col_item_count(self.master, "default", "_default",
                                                "_default", self.stat_col)
            if src_items != des_items["default"]:
                self.fail("items do not match. src: {0} != des: {1}"
                          .format(src_items, des_items["default"]))
        elif src_cbver[:3] >= "7.0" and des_cbver[:3] >= "7.0":
            if src_items["default"] != des_items["default"]:
                self.fail("items do not match. src: {0} != des: {1}"
                          .format(src_items["default"], des_items["default"]))

    def test_xdcr_upgrade_with_services(self):
        after_upgrade_services_in = self.input.param("after_upgrade_services_in", False)
        after_upgrade_buckets_in = self.input.param("after_upgrade_buckets_in", False)
        after_upgrade_buckets_out = self.input.param("after_upgrade_buckets_out", False)
        after_upgrade_buckets_flush = self.input.param("after_upgrade_buckets_flush", False)

        # Install initial version on the specified nodes
        self._install(self.servers[:self.nodes_init])
        # Configure the nodes with services on cluster1
        self.operations(self.servers[:self.nodes_init], services="kv,kv")
        # get the n1ql node which will be used in pre,during and post upgrade for running n1ql commands
        self.n1ql_server = self.get_nodes_from_services_map(service_type="n1ql")
        # Run the pre upgrade operations, typically creating index
        self.pre_upgrade(self.servers[:self.nodes_init])
        if self.input.param("ddocs_num", 0) > 0:
            self.create_ddocs_and_views()

        self._install(self.servers[self.nodes_init:self.num_servers])
        self.master = self.servers[self.nodes_init]
        # Configure the nodes with services on the other cluster2
        try:
            self.operations(self.servers[self.nodes_init:self.num_servers], services="kv,kv")
            self.sleep(timeout=10)
        except Exception as ex:
            if ex:
                print("error: ", str(ex))
            self.log.info("bucket is created")

        # create a xdcr relationship between cluster1 and cluster2
        self.rest_src.add_remote_cluster(self.servers[self.nodes_init].ip,
                                    self.servers[self.nodes_init].port,
                                    'Administrator', 'password', "C2")

        repl_id = self.rest_src.start_replication('continuous', 'default', "C2")
        if repl_id is not None:
            self.log.info("Replication created successfully")
        # Run the post_upgrade operations
        self._create_ephemeral_buckets()
        self.post_upgrade(self.servers[:self.nodes_init])
        # Add new services after the upgrade
        for upgrade_version in self.upgrade_versions:
            src_nodes = self.servers[:self.nodes_init]
            for server in src_nodes:
                remote = RemoteMachineShellConnection(server)
                remote.stop_server()
            src_upgrade_threads = self._async_update(upgrade_version, src_nodes)
 
            for upgrade_thread in src_upgrade_threads:
                upgrade_thread.join()
            src_success_upgrade = True
            while not self.queue.empty():
                src_success_upgrade &= self.queue.get()
            if not src_success_upgrade:
                self.fail("Upgrade failed in source cluster. See logs above!")
            else:
                self.log.info("Upgrade source cluster success")

            des_nodes = self.servers[self.nodes_init:self.num_servers]
            self.master = self.servers[self.nodes_init]
            for server in des_nodes:
                remote = RemoteMachineShellConnection(server)
                remote.stop_server()
            des_upgrade_threads = self._async_update(upgrade_version, des_nodes)
            for upgrade_thread in des_upgrade_threads:
                upgrade_thread.join()
            des_success_upgrade = True
            while not self.queue.empty():
                des_success_upgrade &= self.queue.get()
            if not des_success_upgrade:
                self.fail("Upgrade failed in des cluster. See logs above!")
            else:
                self.log.info("Upgrade des cluster success")

        self.master = self.servers[0]
        self.rest = RestConnection(self.master)
        self.rest_col = CollectionsRest(self.master)
        self.cli_col = CollectionsCLI(self.master)
        self.stat_col = CollectionsStats(self.master)
        self.log.info("Create scope collection at src cluster")
        #self.rest_col.create_scope_collection_count()
        self._create_scope_collection(self.rest_col, self.cli_col, self.buckets[0].name)
        self.sleep(10)

        self.des_rest = RestConnection(self.servers[self.nodes_init])
        self.des_rest_col = CollectionsRest(self.servers[self.nodes_init])
        self.des_cli_col = CollectionsCLI(self.servers[self.nodes_init])
        self.des_stat_col = CollectionsStats(self.servers[self.nodes_init])
        self.log.info("Create scope collection at des cluster")
        self.buckets = RestConnection(self.servers[self.nodes_init]).get_buckets()
        self._create_scope_collection(self.des_rest_col, self.des_cli_col, self.buckets[0].name)
        self.load_to_collections_bucket()

        self.enable_migration = self.input.param("enable_migration", False)
        if self.enable_migration:
            self.enable_migration_mode(self.buckets[0].name, self.buckets[0].name)
        self.verify_doc_counts()

        if after_upgrade_buckets_in is not False:
            self.bucket_size = 100
            self._create_sasl_buckets(self.master, 1)
            self._create_standard_buckets(self.master, 1)
            if self.input.param("ddocs_num", 0) > 0:
                self.create_ddocs_and_views()
                gen_load = BlobGenerator('upgrade', 'upgrade-', self.value_size,
                                         end=self.num_items)
                self._load_all_buckets(self.master, gen_load, "create", self.expire_time,
                                       flag=self.item_flag)

        # deleting buckets after upgrade
        if after_upgrade_buckets_out is not False:
            self._all_buckets_delete(self.master)
        # flushing buckets after upgrade
        if after_upgrade_buckets_flush is not False:
            self._all_buckets_flush()

    def run_view_queries(self):
        view_query_thread = Thread(target=self.view_queries, name="run_queries",
                                   args=(self.run_view_query_iterations,))
        return view_query_thread

    def view_queries(self, iterations):
        query = {"connectionTimeout": 60000}
        for count in range(iterations):
            for i in range(self.view_num):
                self.cluster.query_view(self.master, self.ddocs[0].name,
                                        self.default_view_name + str(i), query,
                                        expected_rows=None, bucket="default", retry_time=2)

    def create_user(self, node):
        self.log.info("inside create_user")
        testuser = [{'id': 'cbadminbucket', 'name': 'cbadminbucket',
                     'password': '******'}]
        rolelist = [{'id': 'cbadminbucket', 'name': 'cbadminbucket',
                     'roles': 'admin'}]
        self.log.info("before create_user_source")
        RbacBase().create_user_source(testuser, 'builtin', node)
        self.log.info("before add_user_role")
        RbacBase().add_user_role(rolelist, RestConnection(node), 'builtin')
Beispiel #28
0
    def test_continuous_unidirectional_deletes_1(self):
        cluster_ref_a = "cluster_ref_a"
        master_a = self._input.clusters.get(0)[0]
        rest_conn_a = RestConnection(master_a)

        cluster_ref_b = "cluster_ref_b"
        master_b = self._input.clusters.get(1)[0]
        rest_conn_b = RestConnection(master_b)

        # Load some data on cluster a. Do it a few times so that the seqnos are
        # bumped up and then delete it.
        kvstore = ClientKeyValueStore()
        self._params["ops"] = "set"
        load_thread_list = []
        for i in [1, 2, 3]:
            task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
            load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
                                                            self._buckets[0],
                                                            task_def, kvstore)
            load_thread_list.append(load_thread)

        for lt in load_thread_list:
            lt.start()
        for lt in load_thread_list:
            lt.join()
        time.sleep(10)

        self._params["ops"] = "delete"
        task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
        load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
                                                        self._buckets[0],
                                                        task_def, kvstore)
        load_thread.start()
        load_thread.join()

        # Load the same data on cluster b but only once. This will cause the
        # seqno values to be lower than those on cluster a allowing the latter
        # to win during conflict resolution later. Then delete this data, too.
        kvstore = ClientKeyValueStore()
        self._params["ops"] = "set"
        task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
        load_thread = RebalanceDataGenerator.start_load(rest_conn_b,
                                                        self._buckets[0],
                                                        task_def, kvstore)
        load_thread.start()
        load_thread.join()
        time.sleep(10)

        self._params["ops"] = "delete"
        task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
        load_thread = RebalanceDataGenerator.start_load(rest_conn_b,
                                                        self._buckets[0],
                                                        task_def, kvstore)
        load_thread.start()
        load_thread.join()

        # Start replication to replicate the deletes from cluster a (having
        # higher seqnos) to cluster b.
        replication_type = "continuous"
        rest_conn_a.add_remote_cluster(master_b.ip, master_b.port,
                                       master_b.rest_username,
                                       master_b.rest_password, cluster_ref_b)
        (rep_database, rep_id) = rest_conn_a.start_replication(replication_type,
                                                               self._buckets[0],
                                                               cluster_ref_b)
        self._state.append((rest_conn_a, cluster_ref_b, rep_database, rep_id))

        time.sleep(15)

        # Verify replicated data
        self.assertTrue(XDCRBaseTest.verify_changes_feed(rest_conn_a,
                                                         rest_conn_b,
                                                         self._buckets[0],
                                                         self._poll_sleep,
                                                         self._poll_timeout),
                        "Changes feed verification failed")
Beispiel #29
0
    def test_continuous_bidirectional_sets(self):
        cluster_ref_a = "cluster_ref_a"
        master_a = self._input.clusters.get(0)[0]
        rest_conn_a = RestConnection(master_a)

        cluster_ref_b = "cluster_ref_b"
        master_b = self._input.clusters.get(1)[0]
        rest_conn_b = RestConnection(master_b)

        load_thread_list = []

        # Load cluster a with keys that will be exclusively owned by it
        kvstore_a0 = ClientKeyValueStore()
        self._params["ops"] = "set"
        self._params["seed"] = "cluster-a"
        self._params["count"] = self._num_items/4
        task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
        load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
                                                        self._buckets[0],
                                                        task_def, kvstore_a0)
        load_thread_list.append(load_thread)

        # Load cluster a with keys that it will share with cluster b and which
        # will win during conflict resolution
        kvstore_a1 = ClientKeyValueStore()
        self._params["ops"] = "set"
        self._params["seed"] = "cluster-a-wins"
        self._params["padding"] = "cluster-a-wins"
        self._params["count"] = self._num_items/4

        # Mutating these keys several times will increase their seqnos and allow
        # them to win during conflict resolution
        for i in [1, 2, 3]:
            task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
            load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
                                                            self._buckets[0],
                                                            task_def,
                                                            kvstore_a1)
            load_thread_list.append(load_thread)

        # Load cluster a with keys that it will share with cluster b but which
        # will lose during conflict resolution
        kvstore_a2 = ClientKeyValueStore()
        self._params["ops"] = "set"
        self._params["seed"] = "cluster-b-wins"
        self._params["padding"] = "cluster-a-loses"
        self._params["count"] = self._num_items/4
        task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
        load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
                                                        self._buckets[0],
                                                        task_def, kvstore_a2)
        load_thread_list.append(load_thread)

        # Load cluster b with keys that will be exclusively owned by it
        kvstore_b0 = ClientKeyValueStore()
        self._params["ops"] = "set"
        self._params["seed"] = "cluster-b"
        self._params["count"] = self._num_items/4
        task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
        load_thread = RebalanceDataGenerator.start_load(rest_conn_b,
                                                        self._buckets[0],
                                                        task_def, kvstore_b0)
        load_thread_list.append(load_thread)

        # Load cluster b with keys that it will share with cluster a and which
        # will win during conflict resolution
        kvstore_b1 = ClientKeyValueStore()
        self._params["ops"] = "set"
        self._params["seed"] = "cluster-b-wins"
        self._params["padding"] = "cluster-b-wins"
        self._params["count"] = self._num_items/4

        # Mutating these keys several times will increase their seqnos and allow
        # them to win during conflict resolution
        for i in [1, 2, 3]:
            task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
            load_thread = RebalanceDataGenerator.start_load(rest_conn_b,
                                                            self._buckets[0],
                                                            task_def,
                                                            kvstore_b1)
            load_thread_list.append(load_thread)

        # Load cluster b with keys that it will share with cluster a but which
        # will lose during conflict resolution
        kvstore_b2 = ClientKeyValueStore()
        self._params["ops"] = "set"
        self._params["seed"] = "cluster-a-wins"
        self._params["padding"] = "cluster-b-loses"
        self._params["count"] = self._num_items/4
        task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
        load_thread = RebalanceDataGenerator.start_load(rest_conn_b,
                                                        self._buckets[0],
                                                        task_def, kvstore_b2)
        load_thread_list.append(load_thread)

        # Setup bidirectional replication between cluster a and cluster b
        replication_type = "continuous"
        rest_conn_a.add_remote_cluster(master_b.ip, master_b.port,
                                       master_b.rest_username,
                                       master_b.rest_password, cluster_ref_b)
        rest_conn_b.add_remote_cluster(master_a.ip, master_a.port,
                                       master_a.rest_username,
                                       master_a.rest_password, cluster_ref_a)
        (rep_database_a, rep_id_a) = rest_conn_a.start_replication(
                                        replication_type, self._buckets[0],
                                        cluster_ref_b)
        (rep_database_b, rep_id_b) = rest_conn_b.start_replication(
                                        replication_type, self._buckets[0],
                                        cluster_ref_a)
        self._state.append((rest_conn_a, cluster_ref_b, rep_database_a, rep_id_a))
        self._state.append((rest_conn_b, cluster_ref_a, rep_database_b, rep_id_b))

        # Start all loads concurrently and wait for them to end
        for lt in load_thread_list:
            lt.start()
        for lt in load_thread_list:
            lt.join()
        self.log.info("All loading threads finished")

        # Verify replicated data
        for rest_conn in [rest_conn_a, rest_conn_b]:
            for kvstore in [kvstore_a0, kvstore_a1, kvstore_b0, kvstore_b1]:
                self.assertTrue(
                    XDCRBaseTest.verify_replicated_data(rest_conn,
                                                        self._buckets[0],
                                                        kvstore,
                                                        self._poll_sleep,
                                                        self._poll_timeout),
                    "Verification of replicated data failed")
        self.assertTrue(XDCRBaseTest.verify_replicated_revs(rest_conn_a,
                                                            rest_conn_b,
                                                            self._buckets[0],
                                                            self._poll_sleep,
                                                            self._poll_timeout),
                        "Verification of replicated revisions failed")
Beispiel #30
0
    def test_incremental_rebalance_in_continuous_bidirectional_sets_deletes(self):
        cluster_ref_a = "cluster_ref_a"
        master_a = self._input.clusters.get(0)[0]
        rest_conn_a = RestConnection(master_a)

        cluster_ref_b = "cluster_ref_b"
        master_b = self._input.clusters.get(1)[0]
        rest_conn_b = RestConnection(master_b)

        # Setup bi-directional continuous replication
        replication_type = "continuous"
        rest_conn_a.add_remote_cluster(master_b.ip, master_b.port,
            master_b.rest_username,
            master_b.rest_password, cluster_ref_b)
        rest_conn_b.add_remote_cluster(master_a.ip, master_a.port,
            master_a.rest_username,
            master_a.rest_password, cluster_ref_a)
        (rep_database_a, rep_id_a) = rest_conn_a.start_replication(
            replication_type, self._buckets[0],
            cluster_ref_b)
        (rep_database_b, rep_id_b) = rest_conn_b.start_replication(
            replication_type, self._buckets[0],
            cluster_ref_a)
        self._state.append((rest_conn_a, cluster_ref_b, rep_database_a, rep_id_a))
        self._state.append((rest_conn_b, cluster_ref_a, rep_database_b, rep_id_b))

        load_thread_list = []
        # Start load
        kvstore = ClientKeyValueStore()
        self._params["ops"] = "set"
        task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
        load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
            self._buckets[0],
            task_def, kvstore)
        load_thread.start()
        load_thread.join()

        # Do some deletes
        self._params["ops"] = "delete"
        self._params["count"] = self._num_items/5
        task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
        load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
            self._buckets[0],
            task_def, kvstore)
        load_thread_list.append(load_thread)

        # Start all loads concurrently
        for lt in load_thread_list:
            lt.start()

        # Trigger rebalance on both source and destination clusters
        servers_a = self._input.clusters.get(0)
        servers_b = self._input.clusters.get(1)
        nodes_a = rest_conn_a.node_statuses()
        nodes_b = rest_conn_b.node_statuses()
        rebalanced_servers_a = [master_a]
        rebalanced_servers_b = [master_b]
        which_servers_a = []
        which_servers_b = []

        # Incremental rebalance in one node in cluster_a, then cluster_b
        while len(nodes_a) < len(servers_a):
            self.log.info("current nodes : {0}".format([node.id for node in rest_conn_a.node_statuses()]))
            rebalanced_in, which_servers_a = RebalanceHelper.rebalance_in(servers_a, 1, monitor=False)
            self.assertTrue(rebalanced_in, msg="unable to add and rebalance more nodes")
            self.assertTrue(rest_conn_a.monitorRebalance(),
                msg="rebalance operation on cluster {0}".format(nodes_a))

            while len(nodes_b) < len(servers_b):
                self.log.info("current nodes : {0}".format([node.id for node in rest_conn_b.node_statuses()]))
                rebalanced_in, which_servers_b = RebalanceHelper.rebalance_in(servers_b, 1, monitor=False)
                self.assertTrue(rebalanced_in, msg="unable to add and rebalance more nodes")
                break
            self.assertTrue(rest_conn_b.monitorRebalance(),
                    msg="rebalance operation on cluster {0}".format(nodes_b))
            rebalanced_servers_b.extend(which_servers_b)
            nodes_b = rest_conn_b.node_statuses()
            rebalanced_servers_a.extend(which_servers_a)
            nodes_a = rest_conn_a.node_statuses()

        # Wait for loading threads to finish
        for lt in load_thread_list:
            lt.join()
        self.log.info("All loading threads finished")

        # Verify replication
        self.assertTrue(XDCRBaseTest.verify_replicated_data(rest_conn_b,
            self._buckets[0],
            kvstore,
            self._poll_sleep,
            self._poll_timeout),
            "Verification of replicated data failed")
        self.assertTrue(XDCRBaseTest.verify_replicated_revs(rest_conn_a,
            rest_conn_b,
            self._buckets[0],
            self._poll_sleep,
            self._poll_timeout),
            "Verification of replicated revisions failed")
Beispiel #31
0
    def test_incremental_rebalance_out_continuous_bidirectional_sets_deletes(self):
        cluster_ref_a = "cluster_ref_a"
        master_a = self._input.clusters.get(0)[0]
        rest_conn_a = RestConnection(master_a)

        cluster_ref_b = "cluster_ref_b"
        master_b = self._input.clusters.get(1)[0]
        rest_conn_b = RestConnection(master_b)

        # Setup bi-directional continuous replication
        replication_type = "continuous"

        rest_conn_a.add_remote_cluster(master_b.ip, master_b.port,
            master_b.rest_username,
            master_b.rest_password, cluster_ref_b)
        rest_conn_b.add_remote_cluster(master_a.ip, master_a.port,
            master_a.rest_username,
            master_a.rest_password, cluster_ref_a)
        (rep_database_a, rep_id_a) = rest_conn_a.start_replication(
            replication_type, self._buckets[0],
            cluster_ref_b)
        (rep_database_b, rep_id_b) = rest_conn_b.start_replication(
            replication_type, self._buckets[0],
            cluster_ref_a)
        self._state.append((rest_conn_a, cluster_ref_b, rep_database_a, rep_id_a))
        self._state.append((rest_conn_b, cluster_ref_a, rep_database_b, rep_id_b))

        load_thread_list = []
        # Start load
        kvstore = ClientKeyValueStore()

        self._params["ops"] = "set"
        task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
        load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
            self._buckets[0],
            task_def, kvstore)
        load_thread.start()
        load_thread.join()

        # Do some deletes
        self._params["ops"] = "delete"
        self._params["count"] = self._num_items/5
        task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
        load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
            self._buckets[0],
            task_def, kvstore)
        load_thread_list.append(load_thread)

        # Start all loads concurrently
        for lt in load_thread_list:
            lt.start()

        # Trigger rebalance on both source and destination clusters
        servers_a = self._input.clusters.get(0)
        servers_b = self._input.clusters.get(1)
        rebalanced_servers_a = []
        rebalanced_servers_b = []
        which_servers_a = []
        which_servers_b = []

        # Rebalance all the nodes together
        RebalanceHelper.rebalance_in(servers_a, len(servers_a)-1)

        RebalanceHelper.rebalance_in(servers_b, len(servers_b)-1)
        rebalanced_servers_a.extend(servers_a)
        rebalanced_servers_b.extend(servers_b)

        nodes_a = rest_conn_a.node_statuses()
        nodes_b = rest_conn_b.node_statuses()
        # Incremental rebalance out one node in cluster_a, then cluster_b
        while len(nodes_a) > 1:
            toBeEjectedNode = RebalanceHelper.pick_node(master_a)

            self.log.info("current nodes : {0}".format(RebalanceHelper.getOtpNodeIds(master_a)))
            self.log.info("removing node {0} and rebalance afterwards".format(toBeEjectedNode.id))
            rest_conn_a.rebalance(otpNodes=[node.id for node in rest_conn_a.node_statuses()], \
                ejectedNodes=[toBeEjectedNode.id])
            self.assertTrue(rest_conn_a.monitorRebalance(),
                msg="rebalance operation failed after adding node {0}".format(toBeEjectedNode.id))

            while len(nodes_b) > 1:
                toBeEjectedNode = RebalanceHelper.pick_node(master_b)
                self.log.info("current nodes : {0}".format(RebalanceHelper.getOtpNodeIds(master_b)))
                self.log.info("removing node {0} and rebalance afterwards".format(toBeEjectedNode.id))
                rest_conn_b.rebalance(otpNodes=[node.id for node in rest_conn_b.node_statuses()],\
                    ejectedNodes=[toBeEjectedNode.id])
                self.assertTrue(rest_conn_b.monitorRebalance(),
                    msg="rebalance operation failed after adding node {0}".format(toBeEjectedNode.id))
                break

            for node in nodes_b:
                for rebalanced_server in rebalanced_servers_b:
                    if rebalanced_server.ip.find(node.ip) != -1:
                        rebalanced_servers_b.remove(rebalanced_server)
                        break
            nodes_b = rest_conn_a.node_statuses()

            for node in nodes_a:
                for rebalanced_server in rebalanced_servers_a:
                    if rebalanced_server.ip.find(node.ip) != -1:
                        rebalanced_servers_a.remove(rebalanced_server)
                        break
            nodes_a = rest_conn_a.node_statuses()

            for node in nodes_a:
                for rebalanced_server in rebalanced_servers_a:
                    if rebalanced_server.ip.find(node.ip) != -1:
                        rebalanced_servers_a.remove(rebalanced_server)
                        break

            nodes_a= rest_conn_a.node_statuses()

        # Wait for loading threads to finish
        for lt in load_thread_list:
            lt.join()
        self.log.info("All loading threads finished")
        # Verify replication
        self.assertTrue(XDCRBaseTest.verify_replicated_data(rest_conn_b,
            self._buckets[0],
            kvstore,
            self._poll_sleep,
            self._poll_timeout),
            "Verification of replicated data failed")
        self.assertTrue(XDCRBaseTest.verify_replicated_revs(rest_conn_a,
            rest_conn_b,
            self._buckets[0],
            self._poll_sleep,
            self._poll_timeout),
            "Verification of replicated revisions failed")
Beispiel #32
0
    def test_failover_continuous_bidirectional_sets_deletes(self):
        cluster_ref_a = "cluster_ref_a"
        master_a = self._input.clusters.get(0)[0]
        rest_conn_a = RestConnection(master_a)

        cluster_ref_b = "cluster_ref_b"
        master_b = self._input.clusters.get(1)[0]
        rest_conn_b = RestConnection(master_b)

        # Rebalance all the nodes together
        servers_a = self._input.clusters.get(0)
        servers_b = self._input.clusters.get(1)
        rebalanced_servers_a = []
        rebalanced_servers_b = []

        RebalanceHelper.rebalance_in(servers_a, len(servers_a)-1)
        RebalanceHelper.rebalance_in(servers_b, len(servers_b)-1)
        rebalanced_servers_a.extend(servers_a)
        rebalanced_servers_b.extend(servers_b)

        # Setup bi-directional continuous replication
        replication_type = "continuous"

        rest_conn_a.add_remote_cluster(master_b.ip, master_b.port,
            master_b.rest_username,
            master_b.rest_password, cluster_ref_b)
        rest_conn_b.add_remote_cluster(master_a.ip, master_a.port,
            master_a.rest_username,
            master_a.rest_password, cluster_ref_a)
        (rep_database_a, rep_id_a) = rest_conn_a.start_replication(
            replication_type, self._buckets[0],
            cluster_ref_b)
        (rep_database_b, rep_id_b) = rest_conn_b.start_replication(
            replication_type, self._buckets[0],
            cluster_ref_a)

        load_thread_list = []
        # Start load
        kvstore = ClientKeyValueStore()

        self._params["ops"] = "set"
        task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
        load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
            self._buckets[0],
            task_def, kvstore)
        load_thread.start()
        load_thread.join()
        RebalanceHelper.wait_for_persistence(master_a, self._buckets[0])

        # Do some deletes
        self._params["ops"] = "delete"
        self._params["count"] = self._num_items/5
        task_def = RebalanceDataGenerator.create_loading_tasks(self._params)
        load_thread = RebalanceDataGenerator.start_load(rest_conn_a,
            self._buckets[0],
            task_def, kvstore)
        load_thread_list.append(load_thread)

        # Start all loads concurrently
        for lt in load_thread_list:
            lt.start()

        # Do the failover of nodes on both clusters
        self.log.info("Failing over nodes")
        self.log.info("current nodes on cluster 1: {0}".format(RebalanceHelper.getOtpNodeIds(master_a)))
        self.log.info("current nodes on cluster 2: {0}".format(RebalanceHelper.getOtpNodeIds(master_b)))

        # Find nodes to be failed_over
        toBeEjectedNodes = RebalanceHelper.pick_nodes(master_a, howmany=self._failover_factor)
        optNodesIds_a = [node.id for node in toBeEjectedNodes]
        if self._fail_orchestrator_a:
            status, content = ClusterOperationHelper.find_orchestrator(master_a)
            self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
            format(status, content))
            optNodesIds_a[0] = content
            master_a = self._input.clusters.get(0)[-1]
            rest_conn_a = RestConnection(master_a)

        #Failover selected nodes
        for node in optNodesIds_a:
            self.log.info("failover node {0} and rebalance afterwards".format(node))
            rest_conn_a.fail_over(node)

        toBeEjectedNodes = RebalanceHelper.pick_nodes(master_b, howmany=self._failover_factor)
        optNodesIds_b = [node.id for node in toBeEjectedNodes]
        if self._fail_orchestrator_b:
            status, content = ClusterOperationHelper.find_orchestrator(master_b)
            self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
            format(status, content))
            optNodesIds_b[0] = content
            master_b = self._input.clusters.get(1)[-1]
            rest_conn_b = RestConnection(master_b)

        self._state.append((rest_conn_a, cluster_ref_b, rep_database_a, rep_id_a))
        self._state.append((rest_conn_b, cluster_ref_a, rep_database_b, rep_id_b))

        #Failover selected nodes
        for node in optNodesIds_b:
            self.log.info("failover node {0} and rebalance afterwards".format(node))
            rest_conn_b.fail_over(node)

        rest_conn_a.rebalance(otpNodes=[node.id for node in rest_conn_a.node_statuses()],\
            ejectedNodes=optNodesIds_a)
        rest_conn_b.rebalance(otpNodes=[node.id for node in rest_conn_b.node_statuses()],\
            ejectedNodes=optNodesIds_b)

        self.assertTrue(rest_conn_a.monitorRebalance(),
            msg="rebalance operation failed after adding node on cluster 1")
        self.assertTrue(rest_conn_b.monitorRebalance(),
            msg="rebalance operation failed after adding node on cluster 2")

        # Wait for loading threads to finish
        for lt in load_thread_list:
            lt.join()
        self.log.info("All loading threads finished")

        # Verify replication
        self.assertTrue(XDCRBaseTest.verify_replicated_data(rest_conn_b,
            self._buckets[0],
            kvstore,
            self._poll_sleep,
            self._poll_timeout),
            "Verification of replicated data failed")
        self.assertTrue(XDCRBaseTest.verify_replicated_revs(rest_conn_a,
            rest_conn_b,
            self._buckets[0],
            self._poll_sleep,
            self._poll_timeout),
            "Verification of replicated revisions failed")
Beispiel #33
0
class DoctorXDCR():
    def __init__(self, src_cluster, dest_cluster):
        self.src_cluster = src_cluster
        self.dest_cluster = dest_cluster
        self.src_master = self.src_cluster.master
        self.dest_master = self.dest_cluster.master
        self.src_rest = RestConnection(self.src_master)
        self.dest_rest = RestConnection(self.dest_master)
        # List of XDCReplication objects for src_cluster
        self.replications = []

    def create_remote_ref(self, name="remote"):
        '''
        Create remote cluster reference from src_cluster to remote_cluster
        :return: None
        '''
        self.src_rest.add_remote_cluster(self.dest_master.ip,
                                         self.dest_master.port,
                                         self.dest_master.rest_username,
                                         self.dest_master.rest_password, name)

    def create_replication(self, remote_ref, src_bkt, dest_bkt):
        '''
        Create replication in the src cluster to replicate data to dest cluster
        '''
        repl_id = self.src_rest.start_replication("continuous", src_bkt,
                                                  remote_ref, "xmem", dest_bkt)
        if repl_id is not None:
            self.replications.append(
                XDCReplication(remote_ref, self.src_cluster, self.dest_cluster,
                               src_bkt, dest_bkt, repl_id))

    def set_xdcr_param(self, src_bkt, dest_bkt, param, value):
        """Set a replication setting to a value
        """
        self.src_rest.set_xdcr_param(src_bkt, dest_bkt, param, value)

    def drop_replication_by_id(self, repl_id):
        '''
        Drop replication in the src cluster
        '''
        self.src_rest.stop_replication(repl_id)
        for repl in self.replications:
            if repl_id == repl.repl_id:
                self.replications.remove(repl)

    def drop_all_replications(self):
        self.src_rest.remove_all_replications()
        self.replications = []

    def delete_remote_ref(self, name):
        self.src_rest.remove_remote_cluster(name)

    def monitor_replications(self, duration=0, print_duration=600):
        st_time = time.time()
        update_time = time.time()
        if duration == 0:
            while not self.stop_run:
                if st_time + print_duration < time.time():
                    for current_repl in self.src_rest.get_replications():
                        for repl in self.replications:
                            if current_repl['id'] == repl.repl_id:
                                print(
                                    "Source bucket {0} doc count = {1}\nDest bucket {2} doc count = {3}"
                                    .format(
                                        repl.src_bkt,
                                        self.bucket_util.
                                        get_bucket_current_item_count(
                                            repl.src_cluster,
                                            repl.src_bkt), repl.dest_bkt,
                                        self.bucket_util.
                                        get_bucket_current_item_count(
                                            repl.dest_cluster, repl.src_bkt)))
        else:
            while st_time + duration > time.time():
                if update_time + print_duration < time.time():
                    update_time = time.time()