Пример #1
0
    def test_XDCR_with_ldap_setup_half_encryption(self):
        rest2 = RestConnection(self.servers[1])
        rest1 = RestConnection(self.servers[0])

        rest2.remove_all_replications()
        rest2.remove_all_remote_clusters()

        rest2.create_bucket("default", ramQuotaMB=100)
        rest1.create_bucket("default", ramQuotaMB=100)
        remote_cluster2 = 'C2'
        remote_server01 = self.servers[0]

        remote_id = rest2.add_remote_cluster(remote_server01.ip,
                                             8091,
                                             'cbadminbucket',
                                             'password',
                                             remote_cluster2,
                                             demandEncryption="on",
                                             encryptionType="half")
        replication_id = rest2.start_replication('continuous', 'default',
                                                 remote_cluster2)
        if replication_id is not None:
            self.assertTrue(True, "Replication was not created successfully")

        rest2.remove_all_replications()
        rest2.remove_all_remote_clusters()
Пример #2
0
    def test_basic_xdcr_with_cert_regenerate(self):

        cluster1 = self.servers[0:2]
        cluster2 = self.servers[2:4]
        remote_cluster_name = 'sslcluster'
        restCluster1 = RestConnection(cluster1[0])
        restCluster2 = RestConnection(cluster2[0])

        try:
            #Setup cluster1
            x509main(cluster1[0]).setup_master()
            x509main(cluster1[1])._setup_node_certificates(reload_cert=False)

            restCluster1.add_node('Administrator','password',cluster1[1].ip)
            known_nodes = ['ns_1@'+cluster1[0].ip,'ns_1@' + cluster1[1].ip]
            restCluster1.rebalance(known_nodes)
            self.assertTrue(self.check_rebalance_complete(restCluster1),"Issue with rebalance")
            restCluster1.create_bucket(bucket='default', ramQuotaMB=100)
            restCluster1.remove_all_replications()
            restCluster1.remove_all_remote_clusters()

            #Setup cluster2
            x509main(cluster2[0]).setup_master()
            x509main(cluster2[1])._setup_node_certificates(reload_cert=False)

            restCluster2.add_node('Administrator','password',cluster2[1].ip)
            known_nodes = ['ns_1@'+cluster2[0].ip,'ns_1@' + cluster2[1].ip]
            restCluster2.rebalance(known_nodes)
            self.assertTrue(self.check_rebalance_complete(restCluster2),"Issue with rebalance")
            restCluster2.create_bucket(bucket='default', ramQuotaMB=100)

            test = x509main.CACERTFILEPATH + x509main.CACERTFILE
            data  =  open(test, 'rb').read()
            restCluster1.add_remote_cluster(cluster2[0].ip,cluster2[0].port,'Administrator','password',remote_cluster_name,certificate=data)
            replication_id = restCluster1.start_replication('continuous','default',remote_cluster_name)

            #restCluster1.set_xdcr_param('default','default','pauseRequested',True)

            x509main(self.master)._delete_inbox_folder()
            x509main(self.master)._generate_cert(self.servers,root_cn="CB\ Authority")
            self.log.info ("Setting up the first cluster for new certificate")

            x509main(cluster1[0]).setup_master()
            x509main(cluster1[1])._setup_node_certificates(reload_cert=False)
            self.log.info ("Setting up the second cluster for new certificate")
            x509main(cluster2[0]).setup_master()
            x509main(cluster2[1])._setup_node_certificates(reload_cert=False)

            status = restCluster1.is_replication_paused('default','default')
            if not status:
                restCluster1.set_xdcr_param('default','default','pauseRequested',False)

            restCluster1.set_xdcr_param('default','default','pauseRequested',True)
            status = restCluster1.is_replication_paused('default','default')
            self.assertTrue(status,"Replication has not started after certificate upgrade")
        finally:
            known_nodes = ['ns_1@'+cluster2[0].ip,'ns_1@'+cluster2[1].ip]
            restCluster2.rebalance(known_nodes,['ns_1@' + cluster2[1].ip])
            self.assertTrue(self.check_rebalance_complete(restCluster2),"Issue with rebalance")
            restCluster2.delete_bucket()
    def test_folderMisMatchCluster(self):
        auditIns = audit(host=self.master)
        orginalPath = auditIns.getAuditLogPath()
        newPath = originalPath + 'testFolderMisMatch'
        shell = RemoteMachineShellConnection(self.servers[0])
        try:
            shell.create_directory(newPath)
            command = 'chown couchbase:couchbase ' + newPath
            shell.execute_command(command)
        finally:
            shell.disconnect()

        auditIns.setsetAuditLogPath(newPath)

        for server in self.servers:
            rest = RestConnection(sever)
            #Create an Event for Bucket Creation
            expectedResults = {'name':'TestBucket ' + server.ip, 'ram_quota':536870912, 'num_replicas':1,
                                       'replica_index':False, 'eviction_policy':'value_only', 'type':'membase', \
                                       'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", \
                                        "flush_enabled":False, "num_threads":3, "source":source, \
                                       "user":user, "ip":self.ipAddress, "port":57457, 'sessionid':'' }
            rest.create_bucket(expectedResults['name'], expectedResults['ram_quota'] / 1048576, expectedResults['auth_type'], 'password', expectedResults['num_replicas'], \
                                       '11211', 'membase', 0, expectedResults['num_threads'], expectedResults['flush_enabled'], 'valueOnly')

            #Check on Events
            try:
                self.checkConfig(self.eventID, self.servers[0], expectedResults)
            except:
                self.log.info ("Issue reading the file at Node {0}".format(server.ip))
Пример #4
0
    def test_folderMisMatchCluster(self):
        auditIns = audit(host=self.master)
        orginalPath = auditIns.getAuditLogPath()
        newPath = originalPath + 'testFolderMisMatch'
        shell = RemoteMachineShellConnection(self.servers[0])
        try:
            shell.create_directory(newPath)
            command = 'chown couchbase:couchbase ' + newPath
            shell.execute_command(command)
        finally:
            shell.disconnect()

        auditIns.setsetAuditLogPath(newPath)

        for server in self.servers:
            rest = RestConnection(sever)
            #Create an Event for Bucket Creation
            expectedResults = {'name':'TestBucket ' + server.ip, 'ram_quota':536870912, 'num_replicas':1,
                                       'replica_index':False, 'eviction_policy':'value_only', 'type':'membase', \
                                       'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", \
                                        "flush_enabled":False, "num_threads":3, "source":source, \
                                       "user":user, "ip":self.ipAddress, "port":57457, 'sessionid':'' }
            rest.create_bucket(expectedResults['name'], expectedResults['ram_quota'] / 1048576, expectedResults['auth_type'], 'password', expectedResults['num_replicas'], \
                                       '11211', 'membase', 0, expectedResults['num_threads'], expectedResults['flush_enabled'], 'valueOnly')

            #Check on Events
            try:
                self.checkConfig(self.eventID, self.servers[0], expectedResults)
            except:
                self.log.info ("Issue reading the file at Node {0}".format(server.ip))
Пример #5
0
 def test_bucket_create_password(self, bucket_name='secretsbucket', num_replicas=1, bucket_size=100):
     for servers in self.servers:
         self.secretmgmt_base_obj.setup_pass_node(servers, self.password)
     bucket_type = self.input.param("bucket_type", 'couchbase')
     tasks = []
     if bucket_type == 'couchbase':
         # self.cluster.create_sasl_bucket(self.master, bucket_name, self.password, num_replicas)
         rest = RestConnection(self.master)
         rest.create_bucket(bucket_name, ramQuotaMB=100)
     elif bucket_type == 'standard':
         self.cluster.create_standard_bucket(self.master, bucket_name, STANDARD_BUCKET_PORT + 1,
                                             bucket_size)
     elif bucket_type == "memcached":
         tasks.append(
             self.cluster.async_create_memcached_bucket(self.master, bucket_name, STANDARD_BUCKET_PORT + 1,
                                                        bucket_size))
         for task in tasks:
             self.assertTrue(task.result(), "Issue with bucket creation")
     else:
         self.log.error('Bucket type not specified')
         return
     self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(bucket_name, RestConnection(self.master)),
                     msg='failed to start up bucket with name "{0}'.format(bucket_name))
     gen_load = BlobGenerator('buckettest', 'buckettest-', self.value_size, start=0, end=self.num_items)
     self._load_all_buckets(self.master, gen_load, "create", 0)
     install_path = self.secretmgmt_base_obj._get_install_path(self.master)
     temp_result = self.secretmgmt_base_obj.check_config_files(self.master, install_path, '/config/config.dat',
                                                               self.password)
     self.assertTrue(temp_result, "Password found in config.dat")
     temp_result = self.secretmgmt_base_obj.check_config_files(self.master, install_path, 'isasl.pw', self.password)
     self.assertTrue(temp_result, "Password found in isasl.pw")
Пример #6
0
    def _XDCR_role_test(self):
        params = {}
        remote_cluster_name = 'rbac_cluster'
        remote_server01 = self.servers[1]
        remote_server02 = self.servers[2]
        read_role = '_replication_admin_read'
        write_role = '_replication_admin_write'
        rest_remote01 = RestConnection(remote_server01)
        rest_remote01.create_bucket(bucket='default', ramQuotaMB=100)
        rest_remote02 = RestConnection(remote_server02)
        rest_remote02.create_bucket(bucket='default', ramQuotaMB=100)

        #------ First Test the Get Requests for XDCR --------------#

        #Remove all remote cluster references
        self.rest.remove_all_replications()
        self.rest.remove_all_remote_clusters()

        #Add remote cluster reference and replications
        self.rest.add_remote_cluster(remote_server01.ip,8091,'Administrator','password',remote_cluster_name)
        replication_id = self.rest.start_replication('continuous','default',remote_cluster_name)
        masDict,tc_status = self.rbac._iterate_role_mapping(read_role,"Administrator","password",{'replication_id':replication_id})

        self.rest.remove_all_replications()
        self.rest.remove_all_remote_clusters()
        rest_remote01.remove_all_replications()
        rest_remote01.remove_all_remote_clusters()
        rest_remote02.remove_all_replications()
        rest_remote02.remove_all_remote_clusters()


        # ----------- Second Test for POST requests for XDCR ---------------#

        self.rest.remove_all_replications()
        self.rest.remove_all_remote_clusters()
        rest_remote01.remove_all_replications()
        rest_remote01.remove_all_remote_clusters()
        rest_remote02.remove_all_replications()
        rest_remote02.remove_all_remote_clusters()


        self.rest.add_remote_cluster(remote_server01.ip,8091,'Administrator','password',"onetotwo")
        #self.rest.add_remote_cluster(remote_server02.ip,8091,'Administrator','password','onetothree')
        #rest_remote01.add_remote_cluster(remote_server02.ip,8091,'Administrator','password',"twotothree")
        rest_remote01.add_remote_cluster(self.master.ip,8091,'Administrator','password','twotoone')
        rest_remote02.add_remote_cluster(remote_server01.ip,8091,'Administrator','password',"threetotwo")
        rest_remote02.add_remote_cluster(self.master.ip,8091,'Administrator','password','threetoone')

        params['remote_cluster_name']='onetotwo'
        params['remoteCluster01'] = {'username': '******', 'password': '******', 'hostname': '192.168.46.103:8091', 'name': 'onetothree'}

        params['create_replication'] = {'replicationType': 'continuous','toBucket': 'default','fromBucket': 'default','toCluster': 'twotoone','type': 'xmem'}
        params['replication_id'] = rest_remote01.start_replication('continuous','default','twotoone')



        masDict,tc_status = self.rbac._iterate_role_mapping('_replication_admin_write01',"Administrator","password",params)
        masDict,tc_status = self.rbac._iterate_role_mapping('_replication_admin_write02',"Administrator","password",params,self.servers[1])

        '''
    def cluster_bucket_xdcr_read(self,username,password,host,port=8091,servers=None,cluster=None,httpCode=None,user_role=None):
        _cluster_bucket_xdcr_read = {
            "replication_settings":"settings/replications/<id>;GET"
        }

        rest = RestConnection(servers[0])
        rest.remove_all_replications()
        rest.remove_all_remote_clusters()

        remote_cluster_name = 'rbac_cluster'
        rest = RestConnection(servers[0])
        remote_server01 = servers[1]
        remote_server02 = servers[2]
        rest_remote01 = RestConnection(remote_server01)
        rest_remote01.delete_bucket()
        rest_remote01.create_bucket(bucket='default', ramQuotaMB=100)
        rest_remote02 = RestConnection(remote_server02)
        remote_id = rest.add_remote_cluster(remote_server01.ip,8091,'Administrator','password',remote_cluster_name)
        replication_id = rest.start_replication('continuous','default',remote_cluster_name)
        replication_id = replication_id.replace("/","%2F")

        bucket_xdcr_read = {"replication_settings":"settings/replications/" + replication_id + ";GET"}
        result = self._return_http_code(bucket_xdcr_read,username,password,host=host,port=port, httpCode=httpCode, user_role=user_role)

        rest.remove_all_replications()
        rest.remove_all_remote_clusters()
        rest_remote01.delete_bucket()
Пример #8
0
 def test_non_default_moxi(self):
     name = 'new-bucket-{0}'.format(uuid.uuid4())
     for serverInfo in self.servers:
         replicas = [0, 1, 2, 3]
         for replicaNumber in replicas:
             rest = RestConnection(serverInfo)
             proxyPort = rest.get_nodes_self().moxi + 2000
             rest.create_bucket(bucket=name,
                                ramQuotaMB=200,
                                replicaNumber=replicaNumber,
                                proxyPort=proxyPort)
             remote = RemoteMachineShellConnection(serverInfo)
             msg = 'create_bucket succeeded but bucket {0} does not exist'.format(
                 name)
             self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
                 name, rest),
                             msg=msg)
             rest.delete_bucket(name)
             msg = 'bucket "{0}" was not deleted even after waiting for 30 seconds'.format(
                 name)
             self.assertTrue(BucketOperationHelper.wait_for_bucket_deletion(
                 name, rest, timeout_in_seconds=30),
                             msg=msg)
             msg = 'bucket {0} data files are not deleted after bucket deleted from membase'.format(
                 name)
             self.assertTrue(self.wait_for_data_files_deletion(
                 name,
                 remote_connection=remote,
                 rest=rest,
                 timeout_in_seconds=20),
                             msg=msg)
             BucketOperationHelper.delete_bucket_or_assert(
                 serverInfo, name, self)
Пример #9
0
 def _create_buckets(self, nodes):
     master_node = nodes[0]
     num_buckets = 0
     if self._default_bucket:
         num_buckets += 1
     num_buckets += self._sasl_buckets + self._standard_buckets
     if num_buckets == 0:
         return
     bucket_size = self._get_bucket_size(master_node, nodes,
                                         self._mem_quota_int, num_buckets)
     rest = RestConnection(master_node)
     master_id = rest.get_nodes_self().id
     if self._default_bucket:
         if self._default_quota != 0:
             bucket_size = self._default_quota
         rest = RestConnection(nodes[0])
         rest.create_bucket(bucket=self.default_bucket_name,
                            ramQuotaMB=bucket_size,
                            replicaNumber=self._num_replicas,
                            proxyPort=11211,
                            authType="none",
                            saslPassword=None)
         self._buckets.append(self.default_bucket_name)
     if self._sasl_buckets > 0:
         if self._sasl_quota != 0:
             bucket_size = self._sasl_quota
         self._create_sasl_buckets(master_node,
                                   master_id,
                                   bucket_size,
                                   password="******")
     if self._standard_buckets > 0:
         if self._standard_quota != 0:
             bucket_size = self._standard_quota
         self._create_standard_buckets(master_node, master_id, bucket_size)
Пример #10
0
    def test_max_buckets(self):
        log = logger.Logger.get_logger()
        serverInfo = self.servers[0]
        log.info('picking server : {0} as the master'.format(serverInfo))
        rest = RestConnection(serverInfo)
        proxyPort = rest.get_nodes_self().moxi
        info = rest.get_nodes_self()
        rest.init_cluster(username=serverInfo.rest_username,
                          password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        bucket_ram = 100
        bucket_count = info.mcdMemoryReserved / bucket_ram

        for i in range(bucket_count):
            bucket_name = 'max_buckets-{0}'.format(uuid.uuid4())
            rest.create_bucket(bucket=bucket_name,
                               ramQuotaMB=bucket_ram,
                               authType='sasl',
                               proxyPort=proxyPort)
            ready = BucketOperationHelper.wait_for_memcached(
                serverInfo, bucket_name)
            self.assertTrue(ready, "wait_for_memcached failed")

        buckets = []
        try:
            buckets = rest.get_buckets()
        except Exception:
            log.info('15 seconds sleep before calling get_buckets again...')
            time.sleep(15)
            buckets = rest.get_buckets()
        if len(buckets) != bucket_count:
            msg = 'tried to create {0} buckets, only created {1}'.format(
                bucket_count, len(buckets))
            log.error(msg)
            self.fail(msg=msg)
Пример #11
0
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.master = TestInputSingleton.input.servers[0]
        ClusterOperationHelper.cleanup_cluster([self.master])
        BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)

        self._bucket_name = 'default'

        serverInfo = self.master

        rest = RestConnection(serverInfo)
        info = rest.get_nodes_self()
        self._bucket_port = info.moxi
        rest.init_cluster(username=serverInfo.rest_username,
                          password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        bucket_ram = info.memoryQuota * 2 / 3

        # Add built-in user
        testuser = [{
            'id': 'cbadminbucket',
            'name': 'cbadminbucket',
            'password': '******'
        }]
        RbacBase().create_user_source(testuser, 'builtin', self.master)

        # Assign user to role
        role_list = [{
            'id': 'cbadminbucket',
            'name': 'cbadminbucket',
            'roles': 'admin'
        }]
        RbacBase().add_user_role(role_list, RestConnection(self.master),
                                 'builtin')

        rest.create_bucket(bucket=self._bucket_name,
                           ramQuotaMB=bucket_ram,
                           proxyPort=info.memcached)

        msg = 'create_bucket succeeded but bucket "default" does not exist'

        if (testconstants.TESTRUNNER_CLIENT in os.environ.keys()
            ) and os.environ[
                testconstants.TESTRUNNER_CLIENT] == testconstants.PYTHON_SDK:
            self.client = SDKSmartClient(
                serverInfo,
                self._bucket_name,
                compression=TestInputSingleton.input.param(
                    "sdk_compression", True))
        else:
            self.client = MemcachedClientHelper.direct_client(
                serverInfo, self._bucket_name)

        self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
            self._bucket_name, rest),
                        msg=msg)
        ready = BucketOperationHelper.wait_for_memcached(
            serverInfo, self._bucket_name)
        self.assertTrue(ready, "wait_for_memcached failed")
        self._log_start()
Пример #12
0
    def test_default_moxi(self):
        name = 'default'
        for serverInfo in self.servers:
            rest = RestConnection(serverInfo)
            replicaNumber = 1
            proxyPort = rest.get_nodes_self().moxi
            rest.create_bucket(bucket=name,
                               ramQuotaMB=200,
                               replicaNumber=replicaNumber,
                               proxyPort=proxyPort)
            msg = 'create_bucket succeeded but bucket {0} does not exist'.format(name)
            self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(name, rest), msg=msg)
            ready = BucketOperationHelper.wait_for_memcached(serverInfo, name)
            self.assertTrue(ready, "wait_for_memcached failed")
            inserted_keys = BucketOperationHelper.load_some_data(serverInfo, 1, name)
            self.assertTrue(inserted_keys, 'unable to insert any key to memcached')
            verified = BucketOperationHelper.verify_data(serverInfo, inserted_keys, True, False, self, bucket=name)
            self.assertTrue(verified, msg='verified all the keys stored')
            #verify keys
            rest.delete_bucket(name)
            msg = 'bucket "{0}" was not deleted even after waiting for two minutes'.format(name)
            self.assertTrue(BucketOperationHelper.wait_for_bucket_deletion(name, rest, timeout_in_seconds=60), msg=msg)

            rest.create_bucket(bucket=name,
                               ramQuotaMB=200,
                               replicaNumber=replicaNumber,
                               proxyPort=proxyPort)
            msg = 'create_bucket succeeded but bucket {0} does not exist'.format(name)
            self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(name, rest), msg=msg)
            BucketOperationHelper.wait_for_memcached(serverInfo, name)
            #now let's recreate the bucket
            self.log.info('recreated the default bucket...')
            #loop over the keys make sure they dont exist
            self.assertTrue(BucketOperationHelper.keys_dont_exist(serverInfo, inserted_keys, name),
                            msg='at least one key found in the bucket')
Пример #13
0
    def test_basic_xdcr_with_cert_regenerate(self):

        cluster1 = self.servers[0:2]
        cluster2 = self.servers[2:4]
        remote_cluster_name = 'sslcluster'
        restCluster1 = RestConnection(cluster1[0])
        restCluster2 = RestConnection(cluster2[0])

        try:
            #Setup cluster1
            x509main(cluster1[0]).setup_master()
            x509main(cluster1[1])._setup_node_certificates(reload_cert=False)

            restCluster1.add_node('Administrator','password',cluster1[1].ip)
            known_nodes = ['ns_1@'+cluster1[0].ip,'ns_1@' + cluster1[1].ip]
            restCluster1.rebalance(known_nodes)
            self.assertTrue(self.check_rebalance_complete(restCluster1),"Issue with rebalance")
            restCluster1.create_bucket(bucket='default', ramQuotaMB=100)
            restCluster1.remove_all_replications()
            restCluster1.remove_all_remote_clusters()

            #Setup cluster2
            x509main(cluster2[0]).setup_master()
            x509main(cluster2[1])._setup_node_certificates(reload_cert=False)

            restCluster2.add_node('Administrator','password',cluster2[1].ip)
            known_nodes = ['ns_1@'+cluster2[0].ip,'ns_1@' + cluster2[1].ip]
            restCluster2.rebalance(known_nodes)
            self.assertTrue(self.check_rebalance_complete(restCluster2),"Issue with rebalance")
            restCluster2.create_bucket(bucket='default', ramQuotaMB=100)

            test = x509main.CACERTFILEPATH + x509main.CACERTFILE
            data  =  open(test, 'rb').read()
            restCluster1.add_remote_cluster(cluster2[0].ip,cluster2[0].port,'Administrator','password',remote_cluster_name,certificate=data)
            replication_id = restCluster1.start_replication('continuous','default',remote_cluster_name)

            #restCluster1.set_xdcr_param('default','default','pauseRequested',True)

            x509main(self.master)._delete_inbox_folder()
            x509main(self.master)._generate_cert(self.servers,root_cn="CB\ Authority")
            self.log.info ("Setting up the first cluster for new certificate")

            x509main(cluster1[0]).setup_master()
            x509main(cluster1[1])._setup_node_certificates(reload_cert=False)
            self.log.info ("Setting up the second cluster for new certificate")
            x509main(cluster2[0]).setup_master()
            x509main(cluster2[1])._setup_node_certificates(reload_cert=False)

            status = restCluster1.is_replication_paused('default','default')
            if not status:
                restCluster1.set_xdcr_param('default','default','pauseRequested',False)

            restCluster1.set_xdcr_param('default','default','pauseRequested',True)
            status = restCluster1.is_replication_paused('default','default')
            self.assertTrue(status,"Replication has not started after certificate upgrade")
        finally:
            known_nodes = ['ns_1@'+cluster2[0].ip,'ns_1@'+cluster2[1].ip]
            restCluster2.rebalance(known_nodes,['ns_1@' + cluster2[1].ip])
            self.assertTrue(self.check_rebalance_complete(restCluster2),"Issue with rebalance")
            restCluster2.delete_bucket()
Пример #14
0
    def test_root_crt_rotate_cluster(self):
        rest = RestConnection(self.master)
        x509main(self.master).setup_master()
        x509main().setup_cluster_nodes_ssl(self.servers)
        rest.create_bucket(bucket='default', ramQuotaMB=100)
        self.sleep(30)
        servers_in = self.servers[1:]
        self.cluster.rebalance(self.servers, servers_in, [])

        for server in self.servers:
            result  = self._sdk_connection(host_ip=server.ip)
            self.assertTrue(result,"Can create a ssl connection with correct certificate")

        result,cb   = self._sdk_connection(host_ip=self.master.ip)
        create_docs = Thread(name='create_docs', target=self.createBulkDocuments, args=(cb,))
        create_docs.start()

        x509main(self.master)._delete_inbox_folder()
        x509main(self.master)._generate_cert(self.servers,root_cn="CB\ Authority")
        x509main(self.master).setup_master()
        x509main().setup_cluster_nodes_ssl(self.servers,reload_cert=True)


        create_docs.join()

        for server in self.servers:
            result  = self._sdk_connection(host_ip=server.ip)
            self.assertTrue(result,"Can create a ssl connection with correct certificate")
Пример #15
0
 def _create_buckets(self, nodes):
     master_node = nodes[0]
     num_buckets = 0
     if self._default_bucket:
         num_buckets += 1
     num_buckets += self._sasl_buckets + self._standard_buckets
     bucket_size = self._get_bucket_size(master_node, nodes, self._mem_quota_int, num_buckets)
     rest = RestConnection(master_node)
     master_id = rest.get_nodes_self().id
     if self._default_bucket:
         if self._default_quota != 0:
             bucket_size = self._default_quota
         rest = RestConnection(nodes[0])
         rest.create_bucket(
             bucket=self.default_bucket_name,
             ramQuotaMB=bucket_size,
             replicaNumber=self._num_replicas,
             proxyPort=11211,
             authType="none",
             saslPassword=None,
         )
         self._buckets.append(self.default_bucket_name)
     if self._sasl_buckets > 0:
         if self._sasl_quota != 0:
             bucket_size = self._sasl_quota
         self._create_sasl_buckets(master_node, master_id, bucket_size, password="******")
     if self._standard_buckets > 0:
         if self._standard_quota != 0:
             bucket_size = self._standard_quota
         self._create_standard_buckets(master_node, master_id, bucket_size)
Пример #16
0
    def _cluster_setup(self):
        replicas = self.input.param("replicas", 1)
        keys_count = self.input.param("keys-count", 0)
        num_buckets = self.input.param("num-buckets", 1)

        bucket_name = "default"
        master = self.servers[0]
        credentials = self.input.membase_settings
        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        rest.init_cluster(username=self.master.rest_username,
                          password=self.master.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        rest.reset_autofailover()
        ClusterOperationHelper.add_and_rebalance(self.servers, True)

        if num_buckets == 1:
            bucket_ram = info.memoryQuota * 2 / 3
            rest.create_bucket(bucket=bucket_name,
                               ramQuotaMB=bucket_ram,
                               replicaNumber=replicas,
                               proxyPort=info.moxi)
        else:
            created = BucketOperationHelper.create_multiple_buckets(self.master, replicas, howmany=num_buckets)
            self.assertTrue(created, "unable to create multiple buckets")

        buckets = rest.get_buckets()
        for bucket in buckets:
                ready = BucketOperationHelper.wait_for_memcached(self.master, bucket.name)
                self.assertTrue(ready, msg="wait_for_memcached failed")

        for bucket in buckets:
            inserted_keys_cnt = self.load_data(self.master, bucket.name, keys_count)
            log.info('inserted {0} keys'.format(inserted_keys_cnt))
Пример #17
0
    def execute(self, task_manager):
        rest = RestConnection(self.server)
        if self.size <= 0:
            info = rest.get_nodes_self()
            self.size = info.memoryQuota * 2 / 3

        authType = 'none' if self.password is None else 'sasl'

        try:
            rest.create_bucket(bucket=self.bucket,
                               ramQuotaMB=self.size,
                               replicaNumber=self.replicas,
                               proxyPort=self.port,
                               authType=authType,
                               saslPassword=self.password)
            self.state = CHECKING
            task_manager.schedule(self)
        except BucketCreationException as e:
            self.state = FINISHED
            self.set_exception(e)
        #catch and set all unexpected exceptions
        except Exception as e:
            self.state = FINISHED
            self.log.info("Unexpected Exception Caught")
            self.set_exception(e)
Пример #18
0
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.master = TestInputSingleton.input.servers[0]
        ClusterOperationHelper.cleanup_cluster([self.master])
        BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)

        self._bucket_name = 'default'

        serverInfo = self.master
        rest = RestConnection(serverInfo)
        info = rest.get_nodes_self()
        self._bucket_port = info.moxi
        rest.init_cluster(username=serverInfo.rest_username,
                          password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        bucket_ram = info.memoryQuota * 2 / 3
        rest.create_bucket(bucket=self._bucket_name,
                           ramQuotaMB=bucket_ram,
                           proxyPort=info.memcached)
        msg = 'create_bucket succeeded but bucket "default" does not exist'
        self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
            self._bucket_name, rest),
                        msg=msg)
        ready = BucketOperationHelper.wait_for_memcached(
            serverInfo, self._bucket_name)
        self.assertTrue(ready, "wait_for_memcached failed")
        self._log_start()
Пример #19
0
    def test_non_default_case_sensitive_same_port(self):
        postfix = uuid.uuid4()
        name = 'uppercase_{0}'.format(postfix)
        for serverInfo in self.servers:
            rest = RestConnection(serverInfo)
            proxyPort = rest.get_nodes_self().moxi + 100
            rest.create_bucket(bucket=name,
                               ramQuotaMB=200,
                               proxyPort=proxyPort)
            msg = 'create_bucket succeeded but bucket {0} does not exist'.format(
                name)
            self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
                name, rest),
                            msg=msg)

            self.log.info(
                "user should not be able to create a new bucket on a an already used port"
            )
            name = 'UPPERCASE{0}'.format(postfix)
            try:
                rest.create_bucket(bucket=name,
                                   ramQuotaMB=200,
                                   proxyPort=proxyPort)
                self.fail(
                    'create-bucket did not throw exception while creating a new bucket on an already used port'
                )
            #make sure it raises bucketcreateexception
            except BucketCreationException as ex:
                self.log.error(ex)
    def cluster_xdcr_remote_clusters_write(self,username,password,host,port=8091, servers=None,cluster=None,httpCode=None,user_role=None):

        rest = RestConnection(servers[0])
        rest.remove_all_replications()
        rest.remove_all_remote_clusters()

        _cluster_xdcr_remove_cluster_write = {
            "remoteClusters":"pools/default/remoteClusters;POST",
            "remote_cluster_id":"pools/default/remoteClusters/<id>;PUT",
            "delete_remote":"pools/default/remoteClusters/<id>;DELETE"
        }

        params = {'hostname': "{0}:{1}".format(servers[1].ip, servers[1].port),'username': '******','password': '******','name':'rbac_remote01'}
        add_node = {"remoteClusters":"pools/default/remoteClusters;POST;" + str(params)}
        result = self._return_http_code(add_node,username,password,host=host,port=port, httpCode=httpCode, user_role=user_role)

        rest.remove_all_replications()
        rest.remove_all_remote_clusters()

        remote_cluster_name = 'rbac_cluster'
        rest = RestConnection(servers[0])
        remote_server01 = servers[1]
        remote_server02 = servers[2]
        rest_remote01 = RestConnection(remote_server01)
        rest_remote01.delete_bucket()
        rest_remote01.create_bucket(bucket='default', ramQuotaMB=100)
        rest_remote02 = RestConnection(remote_server02)

        rest.remove_all_replications()
        rest.remove_all_remote_clusters()
        remote_id = rest.add_remote_cluster(remote_server01.ip,8091,'Administrator','password',remote_cluster_name)
        time.sleep(20)
        delete_remote = {"delete_remote":"pools/default/remoteClusters/" + str(remote_cluster_name) + ";DELETE"}
        result = self._return_http_code(delete_remote,username,password,host=host,port=port, httpCode=httpCode, user_role=user_role)
    def cluster_xdcr_settings_write(self,username,password,host,port=8091,servers=None,cluster=None,httpCode=None,user_role=None):

        _cluster_xdcr_settings_read = {
            "replication_settings":"settings/replications;POST;{'httpConnections': 20}"
        }

        rest = RestConnection(servers[0])
        rest.remove_all_replications()
        rest.remove_all_remote_clusters()

        remote_cluster_name = 'rbac_cluster'
        rest = RestConnection(servers[0])
        remote_server01 = servers[1]
        remote_server02 = servers[2]
        rest_remote01 = RestConnection(remote_server01)
        rest_remote01.delete_bucket()
        rest_remote01.create_bucket(bucket='default', ramQuotaMB=100)
        rest_remote02 = RestConnection(remote_server02)
        rest_remote02.delete_bucket()
        remote_id = rest.add_remote_cluster(remote_server01.ip,8091,'Administrator','password',remote_cluster_name)
        time.sleep(20)
        replication_id = rest.start_replication('continuous','default',remote_cluster_name)

        result = self._return_http_code(_cluster_xdcr_settings_read,username,password,host=host,port=port, httpCode=httpCode, user_role=user_role)

        rest.remove_all_replications()
        rest.remove_all_remote_clusters()
        rest_remote01.delete_bucket()
    def cluster_xdcr_remote_clusters_read(self,username,password,host,port=8091, servers=None,cluster=None,httpCode=None,user_role=None):
        remote_cluster_name = 'rbac_cluster'
        rest = RestConnection(servers[0])
        remote_server01 = servers[1]
        remote_server02 = servers[2]
        rest_remote01 = RestConnection(remote_server01)
        rest_remote01.delete_bucket()
        rest_remote01.create_bucket(bucket='default', ramQuotaMB=100)
        rest_remote02 = RestConnection(remote_server02)

        #------ First Test the Get Requests for XDCR --------------#

        #Remove all remote cluster references
        rest.remove_all_replications()
        rest.remove_all_remote_clusters()

        #Add remote cluster reference and replications
        rest.add_remote_cluster(remote_server01.ip,8091,'Administrator','password',remote_cluster_name)
        time.sleep(20)
        replication_id = rest.start_replication('continuous','default',remote_cluster_name)

        _cluster_xdcr_remote_clusters_read ={
            "remove_cluser_read":"/pools/default/remoteClusters;GET",
        }

        result = self._return_http_code(_cluster_xdcr_remote_clusters_read,username,password,host=host,port=8091, httpCode=httpCode, user_role=user_role)
Пример #23
0
    def create_bucket(serverInfo,
                      name='default',
                      replica=1,
                      port=11210,
                      test_case=None,
                      bucket_ram=-1,
                      password=None):
        log = logger.Logger.get_logger()
        rest = RestConnection(serverInfo)
        if bucket_ram < 0:
            info = rest.get_nodes_self()
            bucket_ram = info.memoryQuota * 2 / 3

        if password == None:
            authType = "sasl"
        else:
            authType = "none"

        rest.create_bucket(bucket=name,
                           ramQuotaMB=bucket_ram,
                           replicaNumber=replica,
                           proxyPort=port,
                           authType=authType,
                           saslPassword=password)
        msg = 'create_bucket succeeded but bucket "{0}" does not exist'
        bucket_created = BucketOperationHelper.wait_for_bucket_creation(
            name, rest)
        if not bucket_created:
            log.error(msg)
            if test_case:
                test_case.fail(msg=msg.format(name))
        return bucket_created
Пример #24
0
    def test_root_crt_rotate_cluster(self):
        rest = RestConnection(self.master)
        x509main(self.master).setup_master()
        x509main().setup_cluster_nodes_ssl(self.servers)
        rest.create_bucket(bucket='default', ramQuotaMB=100)
        self.sleep(30)
        servers_in = self.servers[1:]
        self.cluster.rebalance(self.servers, servers_in, [])

        for server in self.servers:
            result  = self._sdk_connection(host_ip=server.ip)
            self.assertTrue(result,"Can create a ssl connection with correct certificate")

        result,cb   = self._sdk_connection(host_ip=self.master.ip)
        create_docs = Thread(name='create_docs', target=self.createBulkDocuments, args=(cb,))
        create_docs.start()

        x509main(self.master)._delete_inbox_folder()
        x509main(self.master)._generate_cert(self.servers,root_cn="CB\ Authority")
        x509main(self.master).setup_master()
        x509main().setup_cluster_nodes_ssl(self.servers,reload_cert=True)


        create_docs.join()

        for server in self.servers:
            result  = self._sdk_connection(host_ip=server.ip)
            self.assertTrue(result,"Can create a ssl connection with correct certificate")
Пример #25
0
    def common_setup(self, replica):
        self._input = TestInputSingleton.input
        self._servers = self._input.servers
        first = self._servers[0]
        self.log = logger.Logger().get_logger()
        self.log.info(self._input)
        rest = RestConnection(first)
        for server in self._servers:
            RestHelper(RestConnection(server)).is_ns_server_running()

        ClusterOperationHelper.cleanup_cluster(self._servers)
        BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self)
        ClusterOperationHelper.add_all_nodes_or_assert(self._servers[0], self._servers, self._input.membase_settings, self)
        nodes = rest.node_statuses()
        otpNodeIds = []
        for node in nodes:
            otpNodeIds.append(node.id)
        info = rest.get_nodes_self()
        bucket_ram = info.mcdMemoryReserved * 3 / 4
        rest.create_bucket(bucket="default",
                           ramQuotaMB=int(bucket_ram),
                           replicaNumber=replica,
                           proxyPort=rest.get_nodes_self().moxi)
        msg = "wait_for_memcached fails"
        ready = BucketOperationHelper.wait_for_memcached(first, "default"),
        self.assertTrue(ready, msg)
        rebalanceStarted = rest.rebalance(otpNodeIds, [])
        self.assertTrue(rebalanceStarted,
                        "unable to start rebalance on master node {0}".format(first.ip))
        self.log.info('started rebalance operation on master node {0}'.format(first.ip))
        rebalanceSucceeded = rest.monitorRebalance()
        # without a bucket this seems to fail
        self.assertTrue(rebalanceSucceeded,
                        "rebalance operation for nodes: {0} was not successful".format(otpNodeIds))
        self.awareness = VBucketAwareMemcached(rest, "default")
Пример #26
0
 def test_sdk(self):
     rest = RestConnection(self.master)
     x509main(self.master).setup_master()
     rest.create_bucket(bucket='default', ramQuotaMB=100)
     result = self._sdk_connection(host_ip=self.master.ip)
     self.assertTrue(result,
                     "Cannot create a security connection with server")
Пример #27
0
    def test_less_than_minimum_memory_quota(self):
        postfix = uuid.uuid4()
        name = 'minmemquota_{0}'.format(postfix)
        for serverInfo in self.servers:
            rest = RestConnection(serverInfo)
            proxyPort = rest.get_nodes_self().moxi
            try:
                rest.create_bucket(bucket=name,
                                   ramQuotaMB=99,
                                   authType='sasl',
                                   proxyPort=proxyPort)
                self.fail(
                    'create-bucket did not throw exception while creating a new bucket with 99 MB quota'
                )
            #make sure it raises bucketcreateexception
            except BucketCreationException as ex:
                self.log.error(ex)

            try:
                rest.create_bucket(bucket=name,
                                   ramQuotaMB=0,
                                   authType='sasl',
                                   proxyPort=proxyPort)

                self.fail(
                    'create-bucket did not throw exception while creating a new bucket with 0 MB quota'
                )
            #make sure it raises bucketcreateexception
            except BucketCreationException as ex:
                self.log.info(ex)
Пример #28
0
    def test_add_remove_autofailover(self):
        rest = RestConnection(self.master)
        serv_out = self.servers[3]
        shell = RemoteMachineShellConnection(serv_out)
        known_nodes = ['ns_1@'+self.master.ip]

        rest.create_bucket(bucket='default', ramQuotaMB=100)
        rest.update_autofailover_settings(True,30)

        x509main(self.master).setup_master()
        x509main().setup_cluster_nodes_ssl(self.servers[1:4])
        for server in self.servers[1:4]:
            rest.add_node('Administrator','password',server.ip)
            known_nodes.append('ns_1@'+server.ip)

        rest.rebalance(known_nodes)
        self.assertTrue(self.check_rebalance_complete(rest),"Issue with rebalance")

        shell.stop_server()
        self.sleep(60)
        shell.start_server()
        self.sleep(30)
        for server in self.servers:
            status = x509main(server)._validate_ssl_login()
            self.assertEqual(status,200,"Not able to login via SSL code")
Пример #29
0
    def test_add_remove_graceful_add_back_node_with_cert(self,recovery_type=None):
        recovery_type = self.input.param('recovery_type')
        rest = RestConnection(self.master)
        known_nodes = ['ns_1@'+self.master.ip]
        progress = None
        count = 0
        servs_inout = self.servers[1:]
        serv_out = 'ns_1@' + servs_inout[1].ip

        rest.create_bucket(bucket='default', ramQuotaMB=100)

        x509main(self.master).setup_master()
        x509main().setup_cluster_nodes_ssl(servs_inout)
        for server in servs_inout:
            rest.add_node('Administrator','password',server.ip)
            known_nodes.append('ns_1@' + server.ip)

        rest.rebalance(known_nodes)
        self.assertTrue(self.check_rebalance_complete(rest),"Issue with rebalance")

        for server in servs_inout:
            status = x509main(server)._validate_ssl_login()
            self.assertEqual(status,200,"Not able to login via SSL code")

        rest.fail_over(serv_out,graceful=True)
        self.assertTrue(self.check_rebalance_complete(rest),"Issue with rebalance")
        rest.set_recovery_type(serv_out,recovery_type)
        rest.add_back_node(serv_out)
        rest.rebalance(known_nodes)
        self.assertTrue(self.check_rebalance_complete(rest),"Issue with rebalance")

        for server in servs_inout:
            status = x509main(server)._validate_ssl_login()
            self.assertEqual(status,200,"Not able to login via SSL code")
Пример #30
0
    def create_bucket(self, task_manager):
        rest = RestConnection(self.server)
        if self.size <= 0:
            info = rest.get_nodes_self()
            self.size = info.memoryQuota * 2 / 3

        authType = 'none' if self.password is None else 'sasl'

        try:
            rest.create_bucket(bucket=self.bucket,
                               ramQuotaMB=self.size,
                               replicaNumber=self.replicas,
                               proxyPort=self.port,
                               authType=authType,
                               saslPassword=self.password)
            self.state = "checking"
            task_manager.schedule(self)
        except BucketCreationException:
            self.state = "finished"
            self.set_result({
                "status":
                "error",
                "value":
                "Failed to create bucket {0}".format(self.bucket)
            })
Пример #31
0
    def test_max_buckets(self):
        log = logger.Logger.get_logger()
        serverInfo = self.servers[0]
        log.info('picking server : {0} as the master'.format(serverInfo))
        rest = RestConnection(serverInfo)
        proxyPort = rest.get_nodes_self().moxi
        info = rest.get_nodes_self()
        rest.init_cluster(username=serverInfo.rest_username,
                          password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        bucket_ram = 100
        bucket_count = info.mcdMemoryReserved / bucket_ram

        for i in range(bucket_count):
            bucket_name = 'max_buckets-{0}'.format(uuid.uuid4())
            rest.create_bucket(bucket=bucket_name,
                               ramQuotaMB=bucket_ram,
                               authType='sasl', proxyPort=proxyPort)
            ready = BucketOperationHelper.wait_for_memcached(serverInfo, bucket_name)
            self.assertTrue(ready, "wait_for_memcached failed")

        buckets = []
        try:
            buckets = rest.get_buckets()
        except Exception:
            log.info('15 seconds sleep before calling get_buckets again...')
            time.sleep(15)
            buckets = rest.get_buckets()
        if len(buckets) != bucket_count:
            msg = 'tried to create {0} buckets, only created {1}'.format(bucket_count, len(buckets))
            log.error(msg)
            self.fail(msg=msg)
Пример #32
0
 def setUp(self):
     super(RbacTestMemcached, self).setUp()
     rest = RestConnection(self.master)
     self.auth_type = self.input.param('auth_type','builtin')
     self.user_id = self.input.param("user_id",None)
     self.user_role = self.input.param("user_role",None)
     self.bucket_name = self.input.param("bucket_name",None)
     rest.create_bucket(bucket=self.bucket_name, ramQuotaMB=100,lww=True)
     self.role_map = self.input.param("role_map",None)
     self.incorrect_bucket = self.input.param("incorrect_bucket",False)
     self.new_role = self.input.param("new_role",None)
     self.new_role_map = self.input.param("new_role_map",None)
     self.no_bucket_access = self.input.param("no_bucket_access",False)
     self.no_access_bucket_name = self.input.param("no_access_bucket_name","noaccess")
     self.all_buckets = self.input.param("all_buckets",None)
     self.ldap_users = rbacmain().returnUserList(self.user_id)
     if self.no_bucket_access:
         rest.create_bucket(bucket=self.no_access_bucket_name, ramQuotaMB=100, lww=True)
     if self.auth_type == 'ldap':
         rbacmain(self.master, 'builtin')._delete_user('cbadminbucket')
     if self.auth_type == 'ldap':
         rbacmain().setup_auth_mechanism(self.servers,'ldap',rest)
         for user in self.ldap_users:
             testuser = [{'id': user[0], 'name': user[0], 'password': user[1]}]
             RbacBase().create_user_source(testuser, 'ldap', self.master)
             self.sleep(10)
     elif self.auth_type == "pam":
         rbacmain().setup_auth_mechanism(self.servers,'pam', rest)
         rbacmain().add_remove_local_user(self.servers, self.ldap_users, 'deluser')
         rbacmain().add_remove_local_user(self.servers, self.ldap_users,'adduser')
     elif self.auth_type == "builtin":
         for user in self.ldap_users:
             testuser = [{'id': user[0], 'name': user[0], 'password': user[1]}]
             RbacBase().create_user_source(testuser, 'builtin', self.master)
             self.sleep(10)
Пример #33
0
    def create_bucket(serverInfo, name='default', replica=1, port=11210, test_case=None, bucket_ram=-1, password=None):
        log = logger.Logger.get_logger()
        rest = RestConnection(serverInfo)
        if bucket_ram < 0:
            info = rest.get_nodes_self()
            bucket_ram = info.memoryQuota * 2 / 3

        if password == None:
            authType = "sasl"
        else:
            authType = "none"

        rest.create_bucket(bucket=name,
                           ramQuotaMB=bucket_ram,
                           replicaNumber=replica,
                           proxyPort=port,
                           authType=authType,
                           saslPassword=password)
        msg = 'create_bucket succeeded but bucket "{0}" does not exist'
        bucket_created = BucketOperationHelper.wait_for_bucket_creation(name, rest)
        if not bucket_created:
            log.error(msg)
            if test_case:
                test_case.fail(msg=msg.format(name))
        return bucket_created
Пример #34
0
    def test_add_remove_autofailover(self):
        rest = RestConnection(self.master)
        serv_out = self.servers[3]
        shell = RemoteMachineShellConnection(serv_out)
        known_nodes = ['ns_1@'+self.master.ip]

        rest.create_bucket(bucket='default', ramQuotaMB=100)
        rest.update_autofailover_settings(True,30)

        x509main(self.master).setup_master()
        x509main().setup_cluster_nodes_ssl(self.servers[1:4])
        for server in self.servers[1:4]:
            rest.add_node('Administrator','password',server.ip)
            known_nodes.append('ns_1@'+server.ip)

        rest.rebalance(known_nodes)
        self.assertTrue(self.check_rebalance_complete(rest),"Issue with rebalance")

        shell.stop_server()
        self.sleep(60)
        shell.start_server()
        self.sleep(30)
        for server in self.servers:
            status = x509main(server)._validate_ssl_login()
            self.assertEqual(status,200,"Not able to login via SSL code")
Пример #35
0
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.master = TestInputSingleton.input.servers[0]
        ClusterOperationHelper.cleanup_cluster([self.master])
        BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)

        self._bucket_name = 'default'

        serverInfo = self.master
        rest = RestConnection(serverInfo)
        info = rest.get_nodes_self()
        self._bucket_port = info.moxi
        rest.init_cluster(username=serverInfo.rest_username,
                          password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        bucket_ram = info.memoryQuota * 2 / 3

        # Add built-in user
        testuser = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'password': '******'}]
        RbacBase().create_user_source(testuser, 'builtin', self.master)
        time.sleep(10)

        # Assign user to role
        role_list = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'roles': 'admin'}]
        RbacBase().add_user_role(role_list, RestConnection(self.master), 'builtin')
        time.sleep(10)

        rest.create_bucket(bucket=self._bucket_name,
                           ramQuotaMB=bucket_ram,
                           proxyPort=info.memcached)
        msg = 'create_bucket succeeded but bucket "default" does not exist'
        self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(self._bucket_name, rest), msg=msg)
        ready = BucketOperationHelper.wait_for_memcached(serverInfo, self._bucket_name)
        self.assertTrue(ready, "wait_for_memcached failed")
        self._log_start()
Пример #36
0
 def test_default_moxi_sasl(self):
     name = 'new-bucket-{0}'.format(uuid.uuid4())
     for serverInfo in self.servers:
         rest = RestConnection(serverInfo)
         replicaNumber = 1
         proxyPort = rest.get_nodes_self().moxi
         rest.create_bucket(bucket=name,
                            ramQuotaMB=200,
                            replicaNumber=replicaNumber,
                            proxyPort=proxyPort,
                            authType="sasl",
                            saslPassword='******')
         msg = 'create_bucket succeeded but bucket {0} does not exist'.format(
             name)
         self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
             name, rest),
                         msg=msg)
         ready = BucketOperationHelper.wait_for_memcached(serverInfo, name)
         self.assertTrue(ready, "wait_for_memcached failed")
         rest.delete_bucket(name)
         msg = 'bucket "{0}" was not deleted even after waiting for two minutes'.format(
             name)
         self.assertTrue(BucketOperationHelper.wait_for_bucket_deletion(
             name, rest, timeout_in_seconds=30),
                         msg=msg)
Пример #37
0
    def test_default_case_sensitive_dedicated(self):
        name = 'Default'
        for serverInfo in self.servers:
            rest = RestConnection(serverInfo)
            proxyPort = rest.get_nodes_self().moxi
            rest.create_bucket(bucket=name,
                               ramQuotaMB=200,
                               authType='sasl',
                               saslPassword='******',
                               proxyPort=proxyPort)
            msg = 'create_bucket succeeded but bucket {0} does not exist'.format(
                name)
            self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
                name, rest),
                            msg=msg)

            name = 'default'

            try:
                rest.create_bucket(bucket=name,
                                   ramQuotaMB=200,
                                   proxyPort=11221,
                                   authType='sasl',
                                   saslPassword='******')
                msg = "create_bucket created two buckets in different case : {0},{1}".format(
                    'default', 'Default')
                self.fail(msg)
            except BucketCreationException as ex:
                #check if 'default' and 'Default' buckets exist
                self.log.info('BucketCreationException was thrown as expected')
                self.log.info(ex.message)
Пример #38
0
    def test_default_case_sensitive_dedicated(self):
        name = 'Default'
        for serverInfo in self.servers:
            rest = RestConnection(serverInfo)
            proxyPort = rest.get_nodes_self().moxi
            rest.create_bucket(bucket=name,
                               ramQuotaMB=200,
                               authType='sasl',
                               saslPassword='******',
                               proxyPort=proxyPort)
            msg = 'create_bucket succeeded but bucket {0} does not exist'.format(name)
            self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(name, rest), msg=msg)

            name = 'default'

            try:
                rest.create_bucket(bucket=name,
                                   ramQuotaMB=200,
                                   proxyPort=11221,
                                   authType='sasl',
                                   saslPassword='******')
                msg = "create_bucket created two buckets in different case : {0},{1}".format('default', 'Default')
                self.fail(msg)
            except BucketCreationException as ex:
            #check if 'default' and 'Default' buckets exist
                self.log.info('BucketCreationException was thrown as expected')
                self.log.info(ex.message)
Пример #39
0
    def test_add_remove_graceful_add_back_node_with_cert(self,recovery_type=None):
        recovery_type = self.input.param('recovery_type')
        rest = RestConnection(self.master)
        known_nodes = ['ns_1@'+self.master.ip]
        progress = None
        count = 0
        servs_inout = self.servers[1:]
        serv_out = 'ns_1@' + servs_inout[1].ip

        rest.create_bucket(bucket='default', ramQuotaMB=100)

        x509main(self.master).setup_master()
        x509main().setup_cluster_nodes_ssl(servs_inout)
        for server in servs_inout:
            rest.add_node('Administrator','password',server.ip)
            known_nodes.append('ns_1@' + server.ip)

        rest.rebalance(known_nodes)
        self.assertTrue(self.check_rebalance_complete(rest),"Issue with rebalance")

        for server in servs_inout:
            status = x509main(server)._validate_ssl_login()
            self.assertEqual(status,200,"Not able to login via SSL code")

        rest.fail_over(serv_out,graceful=True)
        self.assertTrue(self.check_rebalance_complete(rest),"Issue with rebalance")
        rest.set_recovery_type(serv_out,recovery_type)
        rest.add_back_node(serv_out)
        rest.rebalance(known_nodes)
        self.assertTrue(self.check_rebalance_complete(rest),"Issue with rebalance")

        for server in servs_inout:
            status = x509main(server)._validate_ssl_login()
            self.assertEqual(status,200,"Not able to login via SSL code")
Пример #40
0
 def test_valid_length(self):
     max_len = 100
     name_len = self.input.param('name_length', 100)
     name = 'a' * name_len
     master = self.servers[0]
     rest = RestConnection(master)
     proxyPort = rest.get_nodes_self().moxi
     try:
         rest.create_bucket(bucket=name,
                            ramQuotaMB=200,
                            authType='sasl',
                            proxyPort=proxyPort)
         if name_len <= max_len:
             msg = 'failed to start up bucket with valid length'
             self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
                 name, rest),
                             msg=msg)
         else:
             self.fail('Bucket with invalid length created')
     except BucketCreationException as ex:
         self.log.error(ex)
         if name_len <= max_len:
             self.fail('could not create bucket with valid length')
         else:
             self.log.info(
                 'bucket with invalid length not created as expected')
Пример #41
0
    def test_non_default_case_sensitive_different_port(self):
        postfix = uuid.uuid4()
        lowercase_name = 'uppercase_{0}'.format(postfix)
        for serverInfo in self.servers:
            rest = RestConnection(serverInfo)
            proxyPort = rest.get_nodes_self().moxi + 500
            rest.create_bucket(bucket=lowercase_name,
                               ramQuotaMB=200,
                               proxyPort=proxyPort)
            msg = 'create_bucket succeeded but bucket {0} does not exist'.format(
                lowercase_name)
            self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
                lowercase_name, rest),
                            msg=msg)

            uppercase_name = 'UPPERCASE_{0}'.format(postfix)
            try:
                rest.create_bucket(bucket=uppercase_name,
                                   ramQuotaMB=200,
                                   proxyPort=proxyPort + 1000)
                msg = "create_bucket created two buckets in different case : {0},{1}".format(
                    lowercase_name, uppercase_name)
                self.fail(msg)
            except BucketCreationException as ex:
                #check if 'default' and 'Default' buckets exist
                self.log.info('BucketCreationException was thrown as expected')
                self.log.info(ex.message)
Пример #42
0
    def set_get_test(self, value_size, number_of_items):
        fixed_value = MemcachedClientHelper.create_value("S", value_size)
        specs = [
            ("default", 0),
            ("set-get-bucket-replica-1", 1),
            ("set-get-bucket-replica-2", 2),
            ("set-get-bucket-replica-3", 3),
        ]
        serverInfo = self.master
        rest = RestConnection(serverInfo)
        bucket_ram = int(rest.get_nodes_self().memoryQuota / 4)

        mcport = rest.get_nodes_self().memcached
        for name, replica in specs:
            rest.create_bucket(name, bucket_ram, "sasl", "password", replica, mcport)

        bucket_data = {}
        buckets = RestConnection(serverInfo).get_buckets()
        for bucket in buckets:
            bucket_data[bucket.name] = {}
            ready = BucketOperationHelper.wait_for_memcached(serverInfo, bucket.name)
            self.test.assertTrue(ready, "wait_for_memcached failed")

            client = MemcachedClientHelper.direct_client(serverInfo, bucket.name)
            inserted = []
            rejected = []
            while len(inserted) <= number_of_items and len(rejected) <= number_of_items:
                try:
                    key = str(uuid.uuid4())
                    client.set(key, 0, 0, fixed_value)
                    inserted.append(key)
                except mc_bin_client.MemcachedError:
                    pass

            retry = 0
            remaining_items = []
            remaining_items.extend(inserted)
            msg = "memcachedError : {0} - unable to get a pre-inserted key : {1}"
            while retry < 10 and len(remaining_items) > 0:
                verified_keys = []
                for key in remaining_items:
                    try:
                        flag, keyx, value = client.get(key=key)
                        if not value == fixed_value:
                            self.test.fail("value mismatch for key {0}".format(key))
                        verified_keys.append(key)
                    except mc_bin_client.MemcachedError as error:
                        self.log.error(msg.format(error.status, key))
                    retry += 1
                [remaining_items.remove(x) for x in verified_keys]

            print_count = 0
            for key in remaining_items:
                if print_count > 100:
                    break
                print_count += 1
                self.log.error("unable to verify key : {0}".format(key))
            if remaining_items:
                self.test.fail("unable to verify {0} keys".format(len(remaining_items)))
Пример #43
0
 def test_default_moxi(self):
     name = 'default'
     for serverInfo in self.servers:
         rest = RestConnection(serverInfo)
         proxyPort = rest.get_nodes_self().moxi + 1000
         rest.create_bucket(bucket=name, ramQuotaMB=200, proxyPort=proxyPort)
         msg = 'create_bucket succeeded but bucket {0} does not exist'.format(name)
         self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(name, rest), msg=msg)
Пример #44
0
 def test_default_moxi(self):
     name = 'default'
     for serverInfo in self.servers:
         rest = RestConnection(serverInfo)
         proxyPort = rest.get_nodes_self().moxi + 1000
         rest.create_bucket(bucket=name, ramQuotaMB=200, proxyPort=proxyPort)
         msg = 'create_bucket succeeded but bucket {0} does not exist'.format(name)
         self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(name, rest), msg=msg)
Пример #45
0
    def _cluster_setup(self):
        keys_count = self.input.param("keys-count", 0)
        num_buckets = self.input.param("num-buckets", 1)
        bucketType = self.input.param("bucketType", "ephemeral")
        evictionPolicy = self.input.param("evictionPolicy",
                                          "noEviction")  # fullEviction
        self.bucket_storage = self.input.param("bucket_storage", 'couchstore')

        # master = self.servers[0]
        # credentials = self.input.membase_settings
        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        rest.init_cluster(username=self.master.rest_username,
                          password=self.master.rest_password)
        memory = min(info.mcdMemoryReserved,
                     self.input.param("kv_memory", 1000))
        rest.init_cluster_memoryQuota(memoryQuota=memory)
        rest.reset_autoreprovision()
        self._add_and_rebalance(self.servers, True)

        if num_buckets == 1:
            bucket_name = "default"
            bucket_ram = info.memoryQuota * 2 // 3
            rest.create_bucket(bucket=bucket_name,
                               ramQuotaMB=bucket_ram,
                               replicaNumber=self.replicas,
                               proxyPort=info.moxi,
                               bucketType=bucketType,
                               evictionPolicy=evictionPolicy,
                               storageBackend=self.bucket_storage)
        else:
            created = BucketOperationHelper.create_multiple_buckets(
                self.master,
                self.replicas,
                howmany=num_buckets,
                bucketType=bucketType,
                evictionPolicy=evictionPolicy,
                storageBackend=self.bucket_storage)
            self.assertTrue(created, "unable to create multiple buckets")

        buckets = rest.get_buckets()
        for bucket in buckets:
            ready = BucketOperationHelper.wait_for_memcached(
                self.master, bucket.name)
            self.assertTrue(ready, msg="wait_for_memcached failed")

        for bucket in buckets:
            distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
            inserted_count, rejected_count = self.load_bucket_and_return_the_keys(
                servers=[self.master],
                name=bucket.name,
                # ram_load_ratio=0.02,
                value_size_distribution=distribution,
                write_only=True,
                moxi=True,
                number_of_threads=2,
                number_of_items=keys_count)
            self.loaded_items[bucket.name] = inserted_count
Пример #46
0
    def common_test_body(self, replica, load_ratio, timeout=10):
        log = logger.Logger.get_logger()
        start_time = time.time()
        log.info("replica : {0}".format(replica))
        log.info("load_ratio : {0}".format(load_ratio))
        master = self._servers[0]
        log.info('picking server : {0} as the master'.format(master))
        rest = RestConnection(master)
        info = rest.get_nodes_self()
        rest.init_cluster(username=master.rest_username,
                          password=master.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        bucket_ram = info.mcdMemoryReserved * 2 / 3
        rest.create_bucket(bucket='default',
                           ramQuotaMB=bucket_ram,
                           replicaNumber=replica,
                           proxyPort=11211)
        json_bucket = {'name': 'default', 'port': 11211, 'password': ''}
        BucketOperationHelper.wait_for_memcached(master, json_bucket)
        log.info("inserting some items in the master before adding any nodes")
        distribution = {1024: 0.4, 2 * 1024: 0.5, 512: 0.1}
        threads = MemcachedClientHelper.create_threads(
            servers=[master],
            value_size_distribution=distribution,
            number_of_threads=len(self._servers),
            number_of_items=400000000,
            moxi=False,
            write_only=True,
            async_write=True)
        for thread in threads:
            thread.terminate_in_minutes = 24 * 60
            thread.start()
        while time.time() < (start_time + 60 * timeout):
            #rebalance out step nodes
            #let's add some items ?
            nodes = rest.node_statuses()
            delta = len(self._servers) - len(nodes)
            if delta > 0:
                if delta > 1:
                    how_many_add = Random().randint(1, delta)
                else:
                    how_many_add = 1
                self.log.info("going to add {0} nodes".format(how_many_add))
                self.rebalance_in(how_many=how_many_add)
            else:
                self.log.info("all nodes already joined the clustr")
            time.sleep(240)
            RestHelper(rest).wait_for_replication(600)
            #dont rebalance out if there are not too many nodes
            if len(nodes) >= (3.0 / 4.0 * len(self._servers)):
                nodes = rest.node_statuses()
                how_many_out = Random().randint(1, len(nodes) - 1)
                self.log.info("going to remove {0} nodes".format(how_many_out))
                self.rebalance_out(how_many=how_many_out)

        for t in threads:
            t.aborted = True
            t.join()
 def backup_restore(self):
     try:
         backup_start = self.backups[int(self.backupset.start) - 1]
     except IndexError:
         backup_start = "{0}{1}".format(self.backups[-1], self.backupset.start)
     try:
         backup_end = self.backups[int(self.backupset.end) - 1]
     except IndexError:
         backup_end = "{0}{1}".format(self.backups[-1], self.backupset.end)
     args = "restore --archive {0} --repo {1} --host http://{2}:{3} --username {4} --password {5} --start {6} " \
            "--end {7}".format(self.backupset.directory, self.backupset.name,
                                               self.backupset.restore_cluster_host.ip,
                                               self.backupset.restore_cluster_host.port,
                                               self.backupset.restore_cluster_host_username,
                                               self.backupset.restore_cluster_host_password, backup_start,
                                               backup_end)
     if self.backupset.exclude_buckets:
         args += " --exclude-buckets {0}".format(self.backupset.exclude_buckets)
     if self.backupset.include_buckets:
         args += " --include-buckets {0}".format(self.backupset.include_buckets)
     if self.backupset.disable_bucket_config:
         args += " --disable-bucket-config {0}".format(self.backupset.disable_bucket_config)
     if self.backupset.disable_views:
         args += " --disable-views {0}".format(self.backupset.disable_views)
     if self.backupset.disable_gsi_indexes:
         args += " --disable-gsi-indexes {0}".format(self.backupset.disable_gsi_indexes)
     if self.backupset.disable_ft_indexes:
         args += " --disable-ft-indexes {0}".format(self.backupset.disable_ft_indexes)
     if self.backupset.disable_data:
         args += " --disable-data {0}".format(self.backupset.disable_data)
     if self.backupset.filter_keys:
         args += " --filter_keys {0}".format(self.backupset.filter_keys)
     if self.backupset.filter_values:
         args += " --filter_values {0}".format(self.backupset.filter_values)
     if self.backupset.force_updates:
         args += " --force-updates"
     if self.no_progress_bar:
         args += " --no-progress-bar"
     if not self.skip_buckets:
         rest_conn = RestConnection(self.backupset.restore_cluster_host)
         rest_helper = RestHelper(rest_conn)
         for bucket in self.buckets:
             if not rest_helper.bucket_exists(bucket.name):
                 self.log.info("Creating bucket {0} in restore host {1}".format(bucket.name,
                                                                                self.backupset.restore_cluster_host.ip))
                 rest_conn.create_bucket(bucket=bucket.name,
                                         ramQuotaMB=512,
                                         authType=bucket.authType if bucket.authType else 'none',
                                         proxyPort=bucket.port,
                                         saslPassword=bucket.saslPassword)
                 bucket_ready = rest_helper.vbucket_map_ready(bucket.name)
                 if not bucket_ready:
                     self.fail("Bucket %s not created after 120 seconds." % bucket.name)
     remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
     command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, args)
     output, error = remote_client.execute_command(command)
     remote_client.log_command_output(output, error)
     return output, error
Пример #48
0
    def _test_backup_add_restore_bucket_with_expiration_key(self, replica):
        bucket = "default"
        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        size = int(info.memoryQuota * 2.0 / 3.0)
        rest.create_bucket(bucket,
                           ramQuotaMB=size,
                           proxyPort=info.moxi,
                           replicaNumber=replica)
        BucketOperationHelper.wait_for_memcached(self.master, bucket)
        client = MemcachedClientHelper.direct_client(self.master, bucket)
        expiry = 60
        test_uuid = uuid.uuid4()
        keys = ["key_%s_%d" % (test_uuid, i) for i in range(5000)]
        self.log.info("pushing keys with expiry set to {0}".format(expiry))
        for key in keys:
            try:
                client.set(key, expiry, 0, key)
            except mc_bin_client.MemcachedError as error:
                msg = "unable to push key : {0} to bucket : {1} error : {2}"
                self.log.error(msg.format(key, client.vbucketId, error.status))
                self.fail(msg.format(key, client.vbucketId, error.status))
        client.close()
        self.log.info("inserted {0} keys with expiry set to {1}".format(
            len(keys), expiry))
        ready = RebalanceHelper.wait_for_persistence(
            self.master, bucket, bucket_type=self.bucket_type)
        self.assertTrue(ready, "not all items persisted. see logs")
        node = RestConnection(self.master).get_nodes_self()

        output, error = self.shell.execute_command(self.perm_command)
        self.shell.log_command_output(output, error)
        backupHelper = BackupHelper(self.master, self)
        backupHelper.backup(bucket, node, self.remote_tmp_folder)

        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket,
                                                      self)
        rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi)
        BucketOperationHelper.wait_for_memcached(self.master, bucket)
        backupHelper.restore(self.remote_tmp_folder)
        time.sleep(60)
        client = MemcachedClientHelper.direct_client(self.master, bucket)
        self.log.info('verifying that all those keys have expired...')
        for key in keys:
            try:
                client.get(key=key)
                msg = "expiry was set to {0} but key: {1} did not expire after waiting for {2}+ seconds"
                self.fail(msg.format(expiry, key, expiry))
            except mc_bin_client.MemcachedError as error:
                self.assertEquals(
                    error.status,
                    1,
                    msg="expected error code {0} but saw error code {1}".
                    format(1, error.status))
        client.close()
        self.log.info(
            "verified that those keys inserted with expiry set to {0} have expired"
            .format(expiry))
Пример #49
0
class QueryWorkbenchTests(BaseTestCase):
    n1ql_port =8093
    _input = TestInputSingleton.input
    num_items = _input.param("items", 100)
    _value_size = _input.param("value_size", 256)
    gen_create = BlobGenerator('loadOne', 'loadOne',_value_size, end=num_items)
    #bucket and ram quota
    buckets_ram = {
        "CUSTOMER": 100,
        "DISTRICT": 100,
        "HISTORY": 100,
        "ITEM": 100,
        "NEW_ORDER": 100,
        "ORDERS": 100,
        "ORDER_LINE": 100,
        #"default:": 100
        }

    def setUp(self):
        super(QueryWorkbenchTests, self).setUp()
        server = self.master
        if self.input.tuq_client and "client" in self.input.tuq_client:
            server = self.tuq_client
        self.rest = RestConnection(server)
        #self.rest.delete_bucket("default")
        time.sleep(20)
        # drop and recreate buckets
        for i, bucket_name in enumerate(self.buckets_ram.keys()):
            self.rest.create_bucket(bucket=bucket_name,
                                   ramQuotaMB=int(self.buckets_ram[bucket_name]),
                                   replicaNumber=0,
                                   proxyPort=11218+i)
            print self.servers[0]
            #bucket = self.src_cluster.get_bucket_by_name(bucket_name)
        time.sleep(20)
        #self.rest.create_bucket(bucket="default",
                                   #ramQuotaMB=int(self.buckets_ram["default"]),
                                   #replicaNumber=0,
                                   #proxyPort=11218)
        self._load_all_buckets(self,self.servers[0], self.gen_create, "create", 0)
        #time.sleep(20)





    def tearDown(self):
        super(QueryWorkbenchTests, self).tearDown()



    def test_describe(self):
        for bucket_name in self.rest.get_buckets():
            query = "infer %s" % bucket_name
            print query
            result = self.rest.query_tool(query, self.n1ql_port)
            print result
Пример #50
0
    def set_get_test(self, value_size, number_of_items):
        fixed_value = MemcachedClientHelper.create_value("S", value_size)
        specs = [("default", 0),
                ("set-get-bucket-replica-1", 1),
                ("set-get-bucket-replica-2", 2),
                ("set-get-bucket-replica-3", 3)]
        serverInfo = self.master
        rest = RestConnection(serverInfo)
        bucket_ram = int(rest.get_nodes_self().memoryQuota / 4)

        mcport = rest.get_nodes_self().memcached
        for name, replica in specs:
            rest.create_bucket(name, bucket_ram, "sasl", "password", replica, mcport)

        bucket_data = {}
        buckets = RestConnection(serverInfo).get_buckets()
        for bucket in buckets:
            bucket_data[bucket.name] = {}
            ready = BucketOperationHelper.wait_for_memcached(serverInfo, bucket.name)
            self.test.assertTrue(ready, "wait_for_memcached failed")

            client = MemcachedClientHelper.direct_client(serverInfo, bucket.name)
            inserted = []
            rejected = []
            while len(inserted) <= number_of_items and len(rejected) <= number_of_items:
                try:
                    key = str(uuid.uuid4())
                    client.set(key, 0, 0, fixed_value)
                    inserted.append(key)
                except mc_bin_client.MemcachedError:
                    pass

            retry = 0
            remaining_items = []
            remaining_items.extend(inserted)
            msg = "memcachedError : {0} - unable to get a pre-inserted key : {1}"
            while retry < 10 and len(remaining_items) > 0:
                verified_keys = []
                for key in remaining_items:
                    try:
                        flag, keyx, value = client.get(key=key)
                        if not value == fixed_value:
                            self.test.fail("value mismatch for key {0}".format(key))
                        verified_keys.append(key)
                    except mc_bin_client.MemcachedError as error:
                        self.log.error(msg.format(error.status, key))
                    retry += 1
                [remaining_items.remove(x) for x in verified_keys]

            print_count = 0
            for key in remaining_items:
                if print_count > 100:
                    break
                print_count += 1
                self.log.error("unable to verify key : {0}".format(key))
            if remaining_items:
                self.test.fail("unable to verify {0} keys".format(len(remaining_items)))
Пример #51
0
class QueryWorkbenchTests(BaseTestCase):
    n1ql_port = 8093
    _input = TestInputSingleton.input
    num_items = _input.param("items", 100)
    _value_size = _input.param("value_size", 256)
    gen_create = BlobGenerator('loadOne',
                               'loadOne',
                               _value_size,
                               end=num_items)
    #bucket and ram quota
    buckets_ram = {
        "CUSTOMER": 100,
        "DISTRICT": 100,
        "HISTORY": 100,
        "ITEM": 100,
        "NEW_ORDER": 100,
        "ORDERS": 100,
        "ORDER_LINE": 100
    }

    #"default:": 100}

    def setUp(self):
        super(QueryWorkbenchTests, self).setUp()
        server = self.master
        if self.input.tuq_client and "client" in self.input.tuq_client:
            server = self.tuq_client
        self.rest = RestConnection(server)
        #self.rest.delete_bucket("default")
        time.sleep(20)
        # drop and recreate buckets
        for i, bucket_name in enumerate(self.buckets_ram.keys()):
            self.rest.create_bucket(bucket=bucket_name,
                                    ramQuotaMB=int(
                                        self.buckets_ram[bucket_name]),
                                    replicaNumber=0,
                                    proxyPort=11218 + i)
            self.log.info(self.servers[0])
            #bucket = self.src_cluster.get_bucket_by_name(bucket_name)
        time.sleep(20)
        #self.rest.create_bucket(bucket="default",
        #ramQuotaMB=int(self.buckets_ram["default"]),
        #replicaNumber=0,
        #proxyPort=11218)
        self._load_all_buckets(self, self.servers[0], self.gen_create,
                               "create", 0)
        #time.sleep(20)

    def tearDown(self):
        super(QueryWorkbenchTests, self).tearDown()

    def test_describe(self):
        for bucket_name in self.rest.get_buckets():
            query = "infer %s" % bucket_name
            self.log.info(query)
            result = self.rest.query_tool(query, self.n1ql_port)
            self.log.info(result)
Пример #52
0
    def common_test_body(self, replica, load_ratio, timeout=10):
        log = logger.Logger.get_logger()
        start_time = time.time()
        log.info("replica : {0}".format(replica))
        log.info("load_ratio : {0}".format(load_ratio))
        master = self._servers[0]
        log.info('picking server : {0} as the master'.format(master))
        rest = RestConnection(master)
        info = rest.get_nodes_self()
        rest.init_cluster(username=master.rest_username,
                          password=master.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        bucket_ram = info.mcdMemoryReserved * 2 / 3
        rest.create_bucket(bucket='default',
                           ramQuotaMB=bucket_ram,
                           replicaNumber=replica,
                           proxyPort=11211)
        json_bucket = {'name': 'default', 'port': 11211, 'password': ''}
        BucketOperationHelper.wait_for_memcached(master, json_bucket)
        log.info("inserting some items in the master before adding any nodes")
        distribution = {1024: 0.4, 2 * 1024: 0.5, 512: 0.1}
        threads = MemcachedClientHelper.create_threads(servers=[master],
                                                       value_size_distribution=distribution,
                                                       number_of_threads=len(self._servers),
                                                       number_of_items=400000000,
                                                       moxi=False,
                                                       write_only=True,
                                                       async_write=True)
        for thread in threads:
            thread.terminate_in_minutes = 24 * 60
            thread.start()
        while time.time() < ( start_time + 60 * timeout):
            #rebalance out step nodes
            #let's add some items ?
            nodes = rest.node_statuses()
            delta = len(self._servers) - len(nodes)
            if delta > 0:
                if delta > 1:
                    how_many_add = Random().randint(1, delta)
                else:
                    how_many_add = 1
                self.log.info("going to add {0} nodes".format(how_many_add))
                self.rebalance_in(how_many=how_many_add)
            else:
                self.log.info("all nodes already joined the clustr")
            time.sleep(240)
            RestHelper(rest).wait_for_replication(600)
            #dont rebalance out if there are not too many nodes
            if len(nodes) >= (3.0 / 4.0 * len(self._servers)):
                nodes = rest.node_statuses()
                how_many_out = Random().randint(1, len(nodes) - 1)
                self.log.info("going to remove {0} nodes".format(how_many_out))
                self.rebalance_out(how_many=how_many_out)

        for t in threads:
            t.aborted = True
            t.join()
Пример #53
0
 def test_perm_rest_api(self,permission,user,password,user_role):
     func_name,http_code = self.get_role_permission(permission)
     rest = RestConnection(self.master_ip)
     try:
         rest.create_bucket(bucket='default',ramQuotaMB=100)
     except:
         log.info("Default Bucket already exists")
     final_func = "rbacPermissionList()."+ func_name + "('" + user + "','" + password + "',host=self.master_ip,servers=self.servers,cluster=self.cluster,httpCode=" + str(http_code) +",user_role="+"'" + str(user_role)+"'" + ")"
     flag = eval(final_func)
     return flag
Пример #54
0
 def test_default_on_non_default_port(self):
     name = "default"
     for serverInfo in self.servers:
         rest = RestConnection(serverInfo)
         proxyPort = rest.get_nodes_self().moxi + 1000
         rest.create_bucket(
             bucket=name, ramQuotaMB=200, proxyPort=proxyPort, authType="sasl", saslPassword="******"
         )
         msg = "create_bucket succeeded but bucket {0} does not exist".format(name)
         self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(name, rest), msg=msg)
Пример #55
0
 def createBucketAudit(self, host, bucketName):
     rest = RestConnection(host)
     #Create an Event for Bucket Creation
     expectedResults = {'name':bucketName, 'ram_quota':104857600, 'num_replicas':1,
                                'replica_index':False, 'eviction_policy':'value_only', 'type':'membase', \
                                 'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", \
                                 "flush_enabled":1, "num_threads":3, \
                                 "ip":self.ipAddress, "port":57457, 'sessionid':'' }
     rest.create_bucket(expectedResults['name'], expectedResults['ram_quota'] / 1048576, expectedResults['auth_type'], 'password', expectedResults['num_replicas'], \
                                 '11211', 'membase', 0, expectedResults['num_threads'], expectedResults['flush_enabled'], 'valueOnly')
     return expectedResults
Пример #56
0
 def _create_sasl_buckets(self, server, server_id, bucket_size, password):
     rest = RestConnection(server)
     for i in range(self._sasl_buckets):
         name = "sasl-" + str(i+1)
         rest.create_bucket(bucket=name,
                            ramQuotaMB=bucket_size,
                            replicaNumber=self._num_replicas,
                            proxyPort=11211,
                            authType="sasl",
                            saslPassword=password)
         self._buckets.append(name)
Пример #57
0
 def _create_standard_buckets(self, server, server_id, bucket_size):
     rest = RestConnection(server)
     for i in range(self._standard_buckets):
         name = "standard-" + str(i+1)
         rest.create_bucket(bucket=name,
                            ramQuotaMB=bucket_size,
                            replicaNumber=self._num_replicas,
                            proxyPort=11214+i,
                            authType="none",
                            saslPassword=None)
         self._buckets.append(name)