Пример #1
0
    def test_default_moxi(self):
        name = 'default'
        for serverInfo in self.servers:
            rest = RestConnection(serverInfo)
            replicaNumber = 1
            proxyPort = rest.get_nodes_self().moxi
            rest.create_bucket(bucket=name,
                               ramQuotaMB=200,
                               replicaNumber=replicaNumber,
                               proxyPort=proxyPort)
            msg = 'create_bucket succeeded but bucket {0} does not exist'.format(name)
            self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(name, rest), msg=msg)
            ready = BucketOperationHelper.wait_for_memcached(serverInfo, name)
            self.assertTrue(ready, "wait_for_memcached failed")
            inserted_keys = BucketOperationHelper.load_some_data(serverInfo, 1, name)
            self.assertTrue(inserted_keys, 'unable to insert any key to memcached')
            verified = BucketOperationHelper.verify_data(serverInfo, inserted_keys, True, False, self, bucket=name)
            self.assertTrue(verified, msg='verified all the keys stored')
            #verify keys
            rest.delete_bucket(name)
            msg = 'bucket "{0}" was not deleted even after waiting for two minutes'.format(name)
            self.assertTrue(BucketOperationHelper.wait_for_bucket_deletion(name, rest, timeout_in_seconds=60), msg=msg)

            rest.create_bucket(bucket=name,
                               ramQuotaMB=200,
                               replicaNumber=replicaNumber,
                               proxyPort=proxyPort)
            msg = 'create_bucket succeeded but bucket {0} does not exist'.format(name)
            self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(name, rest), msg=msg)
            BucketOperationHelper.wait_for_memcached(serverInfo, name)
            #now let's recreate the bucket
            self.log.info('recreated the default bucket...')
            #loop over the keys make sure they dont exist
            self.assertTrue(BucketOperationHelper.keys_dont_exist(serverInfo, inserted_keys, name),
                            msg='at least one key found in the bucket')
    def test_default_dedicated(self):
        name = 'recreate-non-default-{0}'.format(uuid.uuid4())
        serverInfo = self.servers[0]
        if serverInfo.ip != "":
            rest = RestConnection(serverInfo)
            replicaNumber = 1
            proxyPort = rest.get_nodes_self().memcached + 2000
            rest.create_bucket(bucket=name,
                               ramQuotaMB=200,
                               replicaNumber=replicaNumber,
                               proxyPort=proxyPort)
            msg = 'create_bucket succeeded but bucket {0} does not exist'.format(
                name)
            self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
                name, rest),
                            msg=msg)
            ready = BucketOperationHelper.wait_for_memcached(serverInfo, name)
            self.assertTrue(ready, "wait_for_memcached failed")
            inserted_keys = BucketOperationHelper.load_some_data(
                serverInfo, 1, name)
            self.assertTrue(inserted_keys,
                            'unable to insert any key to memcached')
            verified = BucketOperationHelper.verify_data(serverInfo,
                                                         inserted_keys,
                                                         True,
                                                         False,
                                                         self,
                                                         bucket=name)
            self.assertTrue(verified, msg='verified all the keys stored')
            #verify keys
            rest.delete_bucket(name)
            msg = 'bucket "{0}" was not deleted even after waiting for two minutes'.format(
                name)
            self.assertTrue(BucketOperationHelper.wait_for_bucket_deletion(
                name, rest, timeout_in_seconds=60),
                            msg=msg)

            rest.create_bucket(bucket=name,
                               ramQuotaMB=200,
                               replicaNumber=replicaNumber,
                               proxyPort=proxyPort)
            msg = 'create_bucket succeeded but bucket {0} does not exist'.format(
                name)
            self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
                name, rest),
                            msg=msg)
            ready = BucketOperationHelper.wait_for_memcached(serverInfo, name)
            self.assertTrue(ready, "wait_for_memcached failed")
            #now let's recreate the bucket
            self.log.info('recreated the default bucket...')
            #loop over the keys make sure they dont exist
            self.assertTrue(BucketOperationHelper.keys_dont_exist(
                serverInfo, inserted_keys, name),
                            msg='at least one key found in the bucket')
Пример #3
0
    def test_default_case_sensitive_dedicated(self):
        name = 'Default'
        for serverInfo in self.servers:
            rest = RestConnection(serverInfo)
            proxyPort = rest.get_nodes_self().moxi
            rest.create_bucket(bucket=name,
                               ramQuotaMB=200,
                               authType='sasl',
                               saslPassword='******',
                               proxyPort=proxyPort)
            msg = 'create_bucket succeeded but bucket {0} does not exist'.format(name)
            self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(name, rest), msg=msg)

            name = 'default'

            try:
                rest.create_bucket(bucket=name,
                                   ramQuotaMB=200,
                                   proxyPort=11221,
                                   authType='sasl',
                                   saslPassword='******')
                msg = "create_bucket created two buckets in different case : {0},{1}".format('default', 'Default')
                self.fail(msg)
            except BucketCreationException as ex:
            #check if 'default' and 'Default' buckets exist
                self.log.info('BucketCreationException was thrown as expected')
                self.log.info(ex.message)
Пример #4
0
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.master = TestInputSingleton.input.servers[0]
        ClusterOperationHelper.cleanup_cluster([self.master])
        BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)

        self._bucket_name = 'default'

        serverInfo = self.master
        rest = RestConnection(serverInfo)
        info = rest.get_nodes_self()
        self._bucket_port = info.moxi
        rest.init_cluster(username=serverInfo.rest_username,
                          password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        bucket_ram = info.memoryQuota * 2 / 3

        # Add built-in user
        testuser = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'password': '******'}]
        RbacBase().create_user_source(testuser, 'builtin', self.master)
        time.sleep(10)

        # Assign user to role
        role_list = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'roles': 'admin'}]
        RbacBase().add_user_role(role_list, RestConnection(self.master), 'builtin')
        time.sleep(10)

        rest.create_bucket(bucket=self._bucket_name,
                           ramQuotaMB=bucket_ram,
                           proxyPort=info.memcached)
        msg = 'create_bucket succeeded but bucket "default" does not exist'
        self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(self._bucket_name, rest), msg=msg)
        ready = BucketOperationHelper.wait_for_memcached(serverInfo, self._bucket_name)
        self.assertTrue(ready, "wait_for_memcached failed")
        self._log_start()
Пример #5
0
    def test_non_default_case_sensitive_same_port(self):
        postfix = uuid.uuid4()
        name = 'uppercase_{0}'.format(postfix)
        master = self.servers[0]
        rest = RestConnection(master)
        proxyPort = rest.get_nodes_self().moxi + 100
        shell = RemoteMachineShellConnection(master)
        url = "http://%s:8091/pools/default/buckets" % master.ip
        params = "name=%s&ramQuotaMB=200&authType=none&replicaNumber=1&proxyPort=%s" \
                                                                   % (name, proxyPort)
        cmd = "curl -X POST -u Administrator:password  -d '%s' %s" % (params, url)
        output, error = shell.execute_command(cmd)
        if output and "error" in output[0]:
            self.fail("Fail to create bucket %s" % name)

        msg = 'create_bucket succeeded but bucket {0} does not exist'.format(name)
        self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(name, rest), msg=msg)

        name = 'UPPERCASE{0}'.format(postfix)
        params = "name=%s&ramQuotaMB=200&authType=none&replicaNumber=1&proxyPort=%s" \
                                                                   % (name, proxyPort)
        cmd = "curl -X POST -u Administrator:password  -d '%s' %s" % (params, url)
        output, error = shell.execute_command(cmd)
        if output and 'port is already in use' not in output[0]:
            self.log.error(output)
            self.fail('create-bucket on same port failed as expected.')
Пример #6
0
 def test_default_moxi_sasl(self):
     name = 'new-bucket-{0}'.format(uuid.uuid4())
     for serverInfo in self.servers:
         rest = RestConnection(serverInfo)
         replicaNumber = 1
         proxyPort = rest.get_nodes_self().moxi
         rest.create_bucket(bucket=name,
                            ramQuotaMB=200,
                            replicaNumber=replicaNumber,
                            proxyPort=proxyPort,
                            authType="sasl",
                            saslPassword='******')
         msg = 'create_bucket succeeded but bucket {0} does not exist'.format(
             name)
         self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
             name, rest),
                         msg=msg)
         ready = BucketOperationHelper.wait_for_memcached(serverInfo, name)
         self.assertTrue(ready, "wait_for_memcached failed")
         rest.delete_bucket(name)
         msg = 'bucket "{0}" was not deleted even after waiting for two minutes'.format(
             name)
         self.assertTrue(BucketOperationHelper.wait_for_bucket_deletion(
             name, rest, timeout_in_seconds=30),
                         msg=msg)
Пример #7
0
 def test_valid_bucket_name(self, password='******'):
         tasks = []
         if self.bucket_type == 'sasl':
             self.cluster.create_sasl_bucket(self.server, self.bucket_name, password, self.num_replicas, self.bucket_size)
             self.buckets.append(Bucket(name=self.bucket_name, authType="sasl", saslPassword=password, num_replicas=self.num_replicas,
                                        bucket_size=self.bucket_size, master_id=self.server))
         elif self.bucket_type == 'standard':
             self.cluster.create_standard_bucket(self.server, self.bucket_name, STANDARD_BUCKET_PORT + 1, self.bucket_size, self.num_replicas)
             self.buckets.append(Bucket(name=self.bucket_name, authType=None, saslPassword=None, num_replicas=self.num_replicas,
                                        bucket_size=self.bucket_size, port=STANDARD_BUCKET_PORT + 1, master_id=self.server))
         elif self.bucket_type == "memcached":
             tasks.append(self.cluster.async_create_memcached_bucket(self.server, self.bucket_name, STANDARD_BUCKET_PORT + 1,
                                                                     self.bucket_size, self.num_replicas))
             self.buckets.append(Bucket(name=self.bucket_name, authType=None, saslPassword=None, num_replicas=self.num_replicas,
                                        bucket_size=self.bucket_size, port=STANDARD_BUCKET_PORT + 1 , master_id=self.server, type='memcached'))
             for task in tasks:
                 task.result()
         else:
             self.log.error('Bucket type not specified')
             return
         self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(self.bucket_name, self.rest),
                         msg='failed to start up bucket with name "{0}'.format(self.bucket_name))
         gen_load = BlobGenerator('buckettest', 'buckettest-', self.value_size, start=0, end=self.num_items)
         self._load_all_buckets(self.server, gen_load, "create", 0)
         self.cluster.bucket_delete(self.server, self.bucket_name)
         self.assertTrue(BucketOperationHelper.wait_for_bucket_deletion(self.bucket_name, self.rest, timeout_in_seconds=60),
                         msg='bucket "{0}" was not deleted even after waiting for 30 seconds'.format(self.bucket_name))
Пример #8
0
    def test_valid_bucket_name(self, password='******'):
            tasks = []
            shared_params = self._create_bucket_params(server=self.server, size=self.bucket_size,
                                                              replicas=self.num_replicas)
            if self.bucket_type == 'sasl':
                self.cluster.create_sasl_bucket(name=self.bucket_name, password=password,bucket_params=shared_params)
                self.buckets.append(Bucket(name=self.bucket_name, authType="sasl", saslPassword=password, num_replicas=self.num_replicas,
                                           bucket_size=self.bucket_size, master_id=self.server))
            elif self.bucket_type == 'standard':
                self.cluster.create_standard_bucket(name=self.bucket_name, port=STANDARD_BUCKET_PORT+1,
                                                    bucket_params=shared_params)
                self.buckets.append(Bucket(name=self.bucket_name, authType=None, saslPassword=None, num_replicas=self.num_replicas,
                                           bucket_size=self.bucket_size, port=STANDARD_BUCKET_PORT + 1, master_id=self.server))
            elif self.bucket_type == "memcached":
                tasks.append(self.cluster.async_create_memcached_bucket(name=self.bucket_name,
                                                                        port=STANDARD_BUCKET_PORT+1,
                                                                        bucket_params=shared_params))

                self.buckets.append(Bucket(name=self.bucket_name, authType=None, saslPassword=None,
                                           num_replicas=self.num_replicas, bucket_size=self.bucket_size,
                                           port=STANDARD_BUCKET_PORT + 1 , master_id=self.server, type='memcached'))
                for task in tasks:
                    task.result()
            else:
                self.log.error('Bucket type not specified')
                return
            self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(self.bucket_name, self.rest),
                            msg='failed to start up bucket with name "{0}'.format(self.bucket_name))
            gen_load = BlobGenerator('buckettest', 'buckettest-', self.value_size, start=0, end=self.num_items)
            self._load_all_buckets(self.server, gen_load, "create", 0)
            self.cluster.bucket_delete(self.server, self.bucket_name)
            self.assertTrue(BucketOperationHelper.wait_for_bucket_deletion(self.bucket_name, self.rest, timeout_in_seconds=60),
                            msg='bucket "{0}" was not deleted even after waiting for 30 seconds'.format(self.bucket_name))
Пример #9
0
    def test_non_default_case_sensitive_different_port(self):
        postfix = uuid.uuid4()
        lowercase_name = 'uppercase_{0}'.format(postfix)
        for serverInfo in self.servers:
            rest = RestConnection(serverInfo)
            proxyPort = rest.get_nodes_self().moxi + 500
            rest.create_bucket(bucket=lowercase_name,
                               ramQuotaMB=200,
                               proxyPort=proxyPort)
            msg = 'create_bucket succeeded but bucket {0} does not exist'.format(
                lowercase_name)
            self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
                lowercase_name, rest),
                            msg=msg)

            uppercase_name = 'UPPERCASE_{0}'.format(postfix)
            try:
                rest.create_bucket(bucket=uppercase_name,
                                   ramQuotaMB=200,
                                   proxyPort=proxyPort + 1000)
                msg = "create_bucket created two buckets in different case : {0},{1}".format(
                    lowercase_name, uppercase_name)
                self.fail(msg)
            except BucketCreationException as ex:
                #check if 'default' and 'Default' buckets exist
                self.log.info('BucketCreationException was thrown as expected')
                self.log.info(ex.message)
Пример #10
0
    def test_non_default_case_sensitive_same_port(self):
        postfix = uuid.uuid4()
        name = 'uppercase_{0}'.format(postfix)
        for serverInfo in self.servers:
            rest = RestConnection(serverInfo)
            proxyPort = rest.get_nodes_self().moxi + 100
            rest.create_bucket(bucket=name,
                               ramQuotaMB=200,
                               proxyPort=proxyPort)
            msg = 'create_bucket succeeded but bucket {0} does not exist'.format(
                name)
            self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
                name, rest),
                            msg=msg)

            self.log.info(
                "user should not be able to create a new bucket on a an already used port"
            )
            name = 'UPPERCASE{0}'.format(postfix)
            try:
                rest.create_bucket(bucket=name,
                                   ramQuotaMB=200,
                                   proxyPort=proxyPort)
                self.fail(
                    'create-bucket did not throw exception while creating a new bucket on an already used port'
                )
            #make sure it raises bucketcreateexception
            except BucketCreationException as ex:
                self.log.error(ex)
Пример #11
0
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.master = TestInputSingleton.input.servers[0]
        ClusterOperationHelper.cleanup_cluster([self.master])
        BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)

        self._bucket_name = 'default'

        serverInfo = self.master
        rest = RestConnection(serverInfo)
        info = rest.get_nodes_self()
        self._bucket_port = info.moxi
        rest.init_cluster(username=serverInfo.rest_username,
                          password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        bucket_ram = info.memoryQuota * 2 / 3
        rest.create_bucket(bucket=self._bucket_name,
                           ramQuotaMB=bucket_ram,
                           proxyPort=info.memcached)
        msg = 'create_bucket succeeded but bucket "default" does not exist'
        self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
            self._bucket_name, rest),
                        msg=msg)
        ready = BucketOperationHelper.wait_for_memcached(
            serverInfo, self._bucket_name)
        self.assertTrue(ready, "wait_for_memcached failed")
        self._log_start()
Пример #12
0
 def test_valid_length(self):
     max_len = 100
     name_len = self.input.param('name_length', 100)
     name = 'a' * name_len
     master = self.servers[0]
     rest = RestConnection(master)
     proxyPort = rest.get_nodes_self().moxi
     try:
         rest.create_bucket(bucket=name,
                            ramQuotaMB=200,
                            authType='sasl',
                            proxyPort=proxyPort)
         if name_len <= max_len:
             msg = 'failed to start up bucket with valid length'
             self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
                 name, rest),
                             msg=msg)
         else:
             self.fail('Bucket with invalid length created')
     except BucketCreationException as ex:
         self.log.error(ex)
         if name_len <= max_len:
             self.fail('could not create bucket with valid length')
         else:
             self.log.info(
                 'bucket with invalid length not created as expected')
Пример #13
0
 def test_non_default_moxi(self):
     name = 'new-bucket-{0}'.format(uuid.uuid4())
     for serverInfo in self.servers:
         replicas = [0, 1, 2, 3]
         for replicaNumber in replicas:
             rest = RestConnection(serverInfo)
             proxyPort = rest.get_nodes_self().moxi + 2000
             rest.create_bucket(bucket=name,
                                ramQuotaMB=200,
                                replicaNumber=replicaNumber,
                                proxyPort=proxyPort)
             remote = RemoteMachineShellConnection(serverInfo)
             msg = 'create_bucket succeeded but bucket {0} does not exist'.format(
                 name)
             self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
                 name, rest),
                             msg=msg)
             rest.delete_bucket(name)
             msg = 'bucket "{0}" was not deleted even after waiting for 30 seconds'.format(
                 name)
             self.assertTrue(BucketOperationHelper.wait_for_bucket_deletion(
                 name, rest, timeout_in_seconds=30),
                             msg=msg)
             msg = 'bucket {0} data files are not deleted after bucket deleted from membase'.format(
                 name)
             self.assertTrue(self.wait_for_data_files_deletion(
                 name,
                 remote_connection=remote,
                 rest=rest,
                 timeout_in_seconds=20),
                             msg=msg)
             BucketOperationHelper.delete_bucket_or_assert(
                 serverInfo, name, self)
Пример #14
0
 def test_bucket_create_password(self, bucket_name='secretsbucket', num_replicas=1, bucket_size=100):
     for servers in self.servers:
         self.secretmgmt_base_obj.setup_pass_node(servers, self.password)
     bucket_type = self.input.param("bucket_type", 'couchbase')
     tasks = []
     if bucket_type == 'couchbase':
         # self.cluster.create_sasl_bucket(self.master, bucket_name, self.password, num_replicas)
         rest = RestConnection(self.master)
         rest.create_bucket(bucket_name, ramQuotaMB=100)
     elif bucket_type == 'standard':
         self.cluster.create_standard_bucket(self.master, bucket_name, STANDARD_BUCKET_PORT + 1,
                                             bucket_size)
     elif bucket_type == "memcached":
         tasks.append(
             self.cluster.async_create_memcached_bucket(self.master, bucket_name, STANDARD_BUCKET_PORT + 1,
                                                        bucket_size))
         for task in tasks:
             self.assertTrue(task.result(), "Issue with bucket creation")
     else:
         self.log.error('Bucket type not specified')
         return
     self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(bucket_name, RestConnection(self.master)),
                     msg='failed to start up bucket with name "{0}'.format(bucket_name))
     gen_load = BlobGenerator('buckettest', 'buckettest-', self.value_size, start=0, end=self.num_items)
     self._load_all_buckets(self.master, gen_load, "create", 0)
     install_path = self.secretmgmt_base_obj._get_install_path(self.master)
     temp_result = self.secretmgmt_base_obj.check_config_files(self.master, install_path, '/config/config.dat',
                                                               self.password)
     self.assertTrue(temp_result, "Password found in config.dat")
     temp_result = self.secretmgmt_base_obj.check_config_files(self.master, install_path, 'isasl.pw', self.password)
     self.assertTrue(temp_result, "Password found in isasl.pw")
Пример #15
0
    def test_default_case_sensitive_dedicated(self):
        name = 'Default'
        for serverInfo in self.servers:
            rest = RestConnection(serverInfo)
            proxyPort = rest.get_nodes_self().moxi
            rest.create_bucket(bucket=name,
                               ramQuotaMB=200,
                               authType='sasl',
                               saslPassword='******',
                               proxyPort=proxyPort)
            msg = 'create_bucket succeeded but bucket {0} does not exist'.format(
                name)
            self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
                name, rest),
                            msg=msg)

            name = 'default'

            try:
                rest.create_bucket(bucket=name,
                                   ramQuotaMB=200,
                                   proxyPort=11221,
                                   authType='sasl',
                                   saslPassword='******')
                msg = "create_bucket created two buckets in different case : {0},{1}".format(
                    'default', 'Default')
                self.fail(msg)
            except BucketCreationException as ex:
                #check if 'default' and 'Default' buckets exist
                self.log.info('BucketCreationException was thrown as expected')
                self.log.info(ex.message)
Пример #16
0
    def test_ephemeral_buckets(self):
        shared_params = self._create_bucket_params(server=self.server,
                                                   size=100,
                                                   replicas=self.num_replicas,
                                                   bucket_type='ephemeral')
        # just do sasl for now, pending decision on support of non-sasl buckets in 5.0
        self.cluster.create_sasl_bucket(name=self.bucket_name,
                                        password=self.sasl_password,
                                        bucket_params=shared_params)
        self.buckets.append(
            Bucket(name=self.bucket_name,
                   authType="sasl",
                   saslPassword=self.sasl_password,
                   num_replicas=self.num_replicas,
                   bucket_size=self.bucket_size,
                   master_id=self.server))

        self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
            self.bucket_name, self.rest),
                        msg='failed to start up bucket with name "{0}'.format(
                            self.bucket_name))
        gen_load = BlobGenerator('buckettest',
                                 'buckettest-',
                                 self.value_size,
                                 start=0,
                                 end=self.num_items)
        self._load_all_buckets(self.server, gen_load, "create", 0)
        self.cluster.bucket_delete(self.server, self.bucket_name)
        self.assertTrue(
            BucketOperationHelper.wait_for_bucket_deletion(
                self.bucket_name, self.rest, timeout_in_seconds=60),
            msg='bucket "{0}" was not deleted even after waiting for 30 seconds'
            .format(self.bucket_name))
Пример #17
0
    def test_non_default_case_sensitive_same_port(self):
        postfix = uuid.uuid4()
        name = 'uppercase_{0}'.format(postfix)
        master = self.servers[0]
        rest = RestConnection(master)
        proxyPort = rest.get_nodes_self().moxi + 100
        shell = RemoteMachineShellConnection(master)
        url = "http://%s:8091/pools/default/buckets" % master.ip
        params = "name=%s&ramQuotaMB=200&authType=none&replicaNumber=1&proxyPort=%s" \
                                                                   % (name, proxyPort)
        cmd = "curl -X POST -u Administrator:password  -d '%s' %s" % (params, url)
        output, error = shell.execute_command(cmd)
        if output and "error" in output[0]:
            self.fail("Fail to create bucket %s" % name)

        msg = 'create_bucket succeeded but bucket {0} does not exist'.format(name)
        self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(name, rest), msg=msg)

        name = 'UPPERCASE{0}'.format(postfix)
        params = "name=%s&ramQuotaMB=200&authType=none&replicaNumber=1&proxyPort=%s" \
                                                                   % (name, proxyPort)
        cmd = "curl -X POST -u Administrator:password  -d '%s' %s" % (params, url)
        output, error = shell.execute_command(cmd)
        if output and 'port is already in use' not in output[0]:
            self.log.error(output)
            self.fail('create-bucket on same port failed as expected.')
Пример #18
0
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.master = TestInputSingleton.input.servers[0]
        ClusterOperationHelper.cleanup_cluster([self.master])
        BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)

        self._bucket_name = 'default'

        serverInfo = self.master

        rest = RestConnection(serverInfo)
        info = rest.get_nodes_self()
        self._bucket_port = info.moxi
        rest.init_cluster(username=serverInfo.rest_username,
                          password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        bucket_ram = info.memoryQuota * 2 / 3

        # Add built-in user
        testuser = [{
            'id': 'cbadminbucket',
            'name': 'cbadminbucket',
            'password': '******'
        }]
        RbacBase().create_user_source(testuser, 'builtin', self.master)

        # Assign user to role
        role_list = [{
            'id': 'cbadminbucket',
            'name': 'cbadminbucket',
            'roles': 'admin'
        }]
        RbacBase().add_user_role(role_list, RestConnection(self.master),
                                 'builtin')

        rest.create_bucket(bucket=self._bucket_name,
                           ramQuotaMB=bucket_ram,
                           proxyPort=info.memcached)

        msg = 'create_bucket succeeded but bucket "default" does not exist'

        if (testconstants.TESTRUNNER_CLIENT in os.environ.keys()
            ) and os.environ[
                testconstants.TESTRUNNER_CLIENT] == testconstants.PYTHON_SDK:
            self.client = SDKSmartClient(
                serverInfo,
                self._bucket_name,
                compression=TestInputSingleton.input.param(
                    "sdk_compression", True))
        else:
            self.client = MemcachedClientHelper.direct_client(
                serverInfo, self._bucket_name)

        self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
            self._bucket_name, rest),
                        msg=msg)
        ready = BucketOperationHelper.wait_for_memcached(
            serverInfo, self._bucket_name)
        self.assertTrue(ready, "wait_for_memcached failed")
        self._log_start()
Пример #19
0
 def test_default_moxi(self):
     name = 'default'
     for serverInfo in self.servers:
         rest = RestConnection(serverInfo)
         proxyPort = rest.get_nodes_self().moxi + 1000
         rest.create_bucket(bucket=name, ramQuotaMB=200, proxyPort=proxyPort)
         msg = 'create_bucket succeeded but bucket {0} does not exist'.format(name)
         self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(name, rest), msg=msg)
Пример #20
0
 def test_default_moxi(self):
     name = 'default'
     for serverInfo in self.servers:
         rest = RestConnection(serverInfo)
         proxyPort = rest.get_nodes_self().moxi + 1000
         rest.create_bucket(bucket=name, ramQuotaMB=200, proxyPort=proxyPort)
         msg = 'create_bucket succeeded but bucket {0} does not exist'.format(name)
         self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(name, rest), msg=msg)
Пример #21
0
 def test_default_case_sensitive_dedicated(self):
     name = 'Default'
     for serverInfo in self.servers:
         rest = RestConnection(serverInfo)
         rest.create_bucket(bucket=name, ramQuotaMB=256)
         msg = 'create_bucket succeeded but bucket {0} does not exist'.format(
             name)
         self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
             name, rest),
                         msg=msg)
Пример #22
0
 def test_default_on_non_default_port(self):
     name = "default"
     for serverInfo in self.servers:
         rest = RestConnection(serverInfo)
         proxyPort = rest.get_nodes_self().moxi + 1000
         rest.create_bucket(
             bucket=name, ramQuotaMB=200, proxyPort=proxyPort, authType="sasl", saslPassword="******"
         )
         msg = "create_bucket succeeded but bucket {0} does not exist".format(name)
         self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(name, rest), msg=msg)
Пример #23
0
    def test_valid_chars(self):
        name = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-.%"
        for serverInfo in self.servers:
            rest = RestConnection(serverInfo)
            proxyPort = rest.get_nodes_self().moxi
            try:
                rest.create_bucket(bucket=name, ramQuotaMB=200, authType="sasl", proxyPort=proxyPort)
            except BucketCreationException as ex:
                self.log.error(ex)
                self.fail("could not create bucket with all valid characters")

            msg = "failed to start up bucket with all valid characters"
            self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(name, rest), msg=msg)
Пример #24
0
 def _create_bucket(self, number_of_replicas=1, bucket_name='default'):
     self.bucket_name = bucket_name
     ip_rest = RestConnection(self.servers[0])
     info = ip_rest.get_nodes_self()
     bucket_ram = info.memoryQuota * 2 / 3
     self.log.info('creating bucket : {0}'.format(self.bucket_name))
     ip_rest.create_bucket(bucket=self.bucket_name,
                           ramQuotaMB=bucket_ram,
                           replicaNumber=number_of_replicas,
                           proxyPort=11220)
     msg = 'create_bucket succeeded but bucket {0} does not exist'.format(self.bucket_name)
     self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(self.bucket_name,
                                                                    ip_rest), msg=msg)
     BucketOperationHelper.wait_for_memcached(self.servers[0], self.bucket_name)
Пример #25
0
    def test_one_replica(self):
        postfix = uuid.uuid4()
        name = 'replica_{0}'.format(postfix)
        for serverInfo in self.servers:
            rest = RestConnection(serverInfo)
            proxyPort = rest.get_nodes_self().moxi
            try:
                rest.create_bucket(bucket=name, ramQuotaMB=200, authType='sasl', proxyPort=proxyPort)
            except BucketCreationException as ex:
                self.log.error(ex)
                self.fail('failed to create bucket with 1 replicas')

            msg = 'failed to start up bucket with 1 replicas'
            self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(name, rest), msg=msg)
Пример #26
0
 def _create_bucket(self, number_of_replicas=1, bucket_name='default'):
     self.bucket_name = bucket_name
     ip_rest = RestConnection(self.servers[0])
     info = ip_rest.get_nodes_self()
     bucket_ram = info.memoryQuota * 2 / 3
     self.log.info('creating bucket : {0}'.format(self.bucket_name))
     ip_rest.create_bucket(bucket=self.bucket_name,
                           ramQuotaMB=bucket_ram,
                           replicaNumber=number_of_replicas,
                           proxyPort=11220)
     msg = 'create_bucket succeeded but bucket {0} does not exist'.format(self.bucket_name)
     self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(self.bucket_name,
                                                                    ip_rest), msg=msg)
     BucketOperationHelper.wait_for_memcached(self.servers[0], self.bucket_name)
Пример #27
0
    def test_valid_chars(self):
        name = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-.%'
        for serverInfo in self.servers:
            rest = RestConnection(serverInfo)
            proxyPort = rest.get_nodes_self().moxi
            try:
                rest.create_bucket(bucket=name, ramQuotaMB=200,
                                   authType='sasl', proxyPort=proxyPort)
            except BucketCreationException as ex:
                self.log.error(ex)
                self.fail('could not create bucket with all valid characters')

            msg = 'failed to start up bucket with all valid characters'
            self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(name, rest), msg=msg)
Пример #28
0
    def test_one_replica(self):
        postfix = uuid.uuid4()
        name = 'replica_{0}'.format(postfix)
        for serverInfo in self.servers:
            rest = RestConnection(serverInfo)
            proxyPort = rest.get_nodes_self().moxi
            try:
                rest.create_bucket(bucket=name, ramQuotaMB=200, authType='sasl', proxyPort=proxyPort)
            except BucketCreationException as ex:
                self.log.error(ex)
                self.fail('failed to create bucket with 1 replicas')

            msg = 'failed to start up bucket with 1 replicas'
            self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(name, rest), msg=msg)
Пример #29
0
    def test_max_memory_quota(self):
        postfix = uuid.uuid4()
        name = "maxquota_{0}".format(postfix)
        for serverInfo in self.servers:
            rest = RestConnection(serverInfo)
            info = rest.get_nodes_self()
            proxyPort = rest.get_nodes_self().moxi
            bucket_ram = info.memoryQuota
            try:
                rest.create_bucket(bucket=name, ramQuotaMB=bucket_ram, authType="sasl", proxyPort=proxyPort)
            except BucketCreationException as ex:
                self.log.error(ex)
                self.fail("failed to create bucket with max ram per node")

            msg = "failed to start up bucket with max ram per node"
            self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(name, rest), msg=msg)
Пример #30
0
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.master = TestInputSingleton.input.servers[0]
        ClusterOperationHelper.cleanup_cluster([self.master])
        BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)

        self._bucket_name = 'default'

        serverInfo = self.master
        rest = RestConnection(serverInfo)
        info = rest.get_nodes_self()
        self._bucket_port = info.moxi
        rest.init_cluster(username=serverInfo.rest_username,
                          password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        bucket_ram = info.memoryQuota * 2 / 3

        # Add built-in user
        testuser = [{
            'id': 'cbadminbucket',
            'name': 'cbadminbucket',
            'password': '******'
        }]
        RbacBase().create_user_source(testuser, 'builtin', self.master)
        time.sleep(10)

        # Assign user to role
        role_list = [{
            'id': 'cbadminbucket',
            'name': 'cbadminbucket',
            'roles': 'admin'
        }]
        RbacBase().add_user_role(role_list, RestConnection(self.master),
                                 'builtin')
        time.sleep(10)

        rest.create_bucket(bucket=self._bucket_name,
                           ramQuotaMB=bucket_ram,
                           proxyPort=info.memcached)
        msg = 'create_bucket succeeded but bucket "default" does not exist'
        self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
            self._bucket_name, rest),
                        msg=msg)
        ready = BucketOperationHelper.wait_for_memcached(
            serverInfo, self._bucket_name)
        self.assertTrue(ready, "wait_for_memcached failed")
        self._log_start()
Пример #31
0
    def test_max_memory_quota(self):
        postfix = uuid.uuid4()
        name = 'maxquota_{0}'.format(postfix)
        for serverInfo in self.servers:
            rest = RestConnection(serverInfo)
            info = rest.get_nodes_self()
            bucket_ram = info.memoryQuota
            try:
                rest.create_bucket(bucket=name, ramQuotaMB=bucket_ram)
            except BucketCreationException as ex:
                self.log.error(ex)
                self.fail('failed to create bucket with max ram per node')

            msg = 'failed to start up bucket with max ram per node'
            self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
                name, rest),
                            msg=msg)
Пример #32
0
    def test_zero_replica(self):
        postfix = uuid.uuid4()
        name = 'replica_{0}'.format(postfix)
        for serverInfo in self.servers:
            rest = RestConnection(serverInfo)
            try:
                rest.create_bucket(bucket=name,
                                   ramQuotaMB=256,
                                   replicaNumber=0)
            except BucketCreationException as ex:
                self.log.error(ex)
                self.fail('failed to create bucket with 0 replicas')

            msg = 'failed to start up bucket with 0 replicas'
            self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
                name, rest),
                            msg=msg)
Пример #33
0
    def test_ephemeral_buckets(self):
        eviction_policy = self.input.param("eviction_policy", 'noEviction')
        shared_params = self._create_bucket_params(server=self.server, size=100,
                                                   replicas=self.num_replicas, bucket_type='ephemeral',
                                                   eviction_policy=eviction_policy)
        # just do sasl for now, pending decision on support of non-sasl buckets in 5.0
        self.cluster.create_sasl_bucket(name=self.bucket_name, password=self.sasl_password, bucket_params=shared_params)
        self.buckets.append(Bucket(name=self.bucket_name, authType="sasl", saslPassword=self.sasl_password,
                                           num_replicas=self.num_replicas,
                                           bucket_size=self.bucket_size, master_id=self.server))

        self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(self.bucket_name, self.rest),
                            msg='failed to start up bucket with name "{0}'.format(self.bucket_name))
        gen_load = BlobGenerator('buckettest', 'buckettest-', self.value_size, start=0, end=self.num_items)
        self._load_all_buckets(self.server, gen_load, "create", 0)
        self.cluster.bucket_delete(self.server, self.bucket_name)
        self.assertTrue(BucketOperationHelper.wait_for_bucket_deletion(self.bucket_name, self.rest, timeout_in_seconds=60),
                            msg='bucket "{0}" was not deleted even after waiting for 30 seconds'.format(self.bucket_name))
Пример #34
0
    def test_default_case_sensitive_different_ports(self):
        name = "default"
        for serverInfo in self.servers:
            rest = RestConnection(serverInfo)
            proxyPort = rest.get_nodes_self().moxi + 500
            rest.create_bucket(bucket=name, ramQuotaMB=200, proxyPort=proxyPort)
            msg = "create_bucket succeeded but bucket {0} does not exist".format(name)
            self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(name, rest), msg=msg)

            try:
                name = "DEFAULT"
                rest.create_bucket(bucket=name, ramQuotaMB=200, proxyPort=proxyPort + 1000)
                msg = "create_bucket created two buckets in different case : {0},{1}".format("default", "DEFAULT")
                self.fail(msg)
            except BucketCreationException as ex:
                # check if 'default' and 'Default' buckets exist
                self.log.info("BucketCreationException was thrown as expected")
                self.log.info(ex.message)
Пример #35
0
    def test_non_default_case_sensitive_same_port(self):
        postfix = uuid.uuid4()
        name = "uppercase_{0}".format(postfix)
        for serverInfo in self.servers:
            rest = RestConnection(serverInfo)
            proxyPort = rest.get_nodes_self().moxi + 100
            rest.create_bucket(bucket=name, ramQuotaMB=200, proxyPort=proxyPort)
            msg = "create_bucket succeeded but bucket {0} does not exist".format(name)
            self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(name, rest), msg=msg)

            self.log.info("user should not be able to create a new bucket on a an already used port")
            name = "UPPERCASE{0}".format(postfix)
            try:
                rest.create_bucket(bucket=name, ramQuotaMB=200, proxyPort=proxyPort)
                self.fail("create-bucket did not throw exception while creating a new bucket on an already used port")
            # make sure it raises bucketcreateexception
            except BucketCreationException as ex:
                self.log.error(ex)
Пример #36
0
 def test_default_moxi_sasl(self):
     name = 'new-bucket-{0}'.format(uuid.uuid4())
     for serverInfo in self.servers:
         rest = RestConnection(serverInfo)
         replicaNumber = 1
         proxyPort = rest.get_nodes_self().moxi
         rest.create_bucket(bucket=name,
                            ramQuotaMB=200,
                            replicaNumber=replicaNumber,
                            proxyPort=proxyPort,
                            authType="sasl",
                            saslPassword='******')
         msg = 'create_bucket succeeded but bucket {0} does not exist'.format(name)
         self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(name, rest), msg=msg)
         ready = BucketOperationHelper.wait_for_memcached(serverInfo, name)
         self.assertTrue(ready, "wait_for_memcached failed")
         rest.delete_bucket(name)
         msg = 'bucket "{0}" was not deleted even after waiting for two minutes'.format(name)
         self.assertTrue(BucketOperationHelper.wait_for_bucket_deletion(name, rest, timeout_in_seconds=30), msg=msg)
Пример #37
0
 def test_valid_length(self):
     max_len = 100
     name_len = self.input.param("name_length", 100)
     name = "a" * name_len
     master = self.servers[0]
     rest = RestConnection(master)
     proxyPort = rest.get_nodes_self().moxi
     try:
         rest.create_bucket(bucket=name, ramQuotaMB=200, authType="sasl", proxyPort=proxyPort)
         if name_len <= max_len:
             msg = "failed to start up bucket with valid length"
             self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(name, rest), msg=msg)
         else:
             self.fail("Bucket with invalid length created")
     except BucketCreationException as ex:
         self.log.error(ex)
         if name_len <= max_len:
             self.fail("could not create bucket with valid length")
         else:
             self.log.info("bucket with invalid length not created as expected")
Пример #38
0
    def test_non_default_case_sensitive_different_port(self):
        postfix = uuid.uuid4()
        lowercase_name = "uppercase_{0}".format(postfix)
        for serverInfo in self.servers:
            rest = RestConnection(serverInfo)
            proxyPort = rest.get_nodes_self().moxi + 500
            rest.create_bucket(bucket=lowercase_name, ramQuotaMB=200, proxyPort=proxyPort)
            msg = "create_bucket succeeded but bucket {0} does not exist".format(lowercase_name)
            self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(lowercase_name, rest), msg=msg)

            uppercase_name = "UPPERCASE_{0}".format(postfix)
            try:
                rest.create_bucket(bucket=uppercase_name, ramQuotaMB=200, proxyPort=proxyPort + 1000)
                msg = "create_bucket created two buckets in different case : {0},{1}".format(
                    lowercase_name, uppercase_name
                )
                self.fail(msg)
            except BucketCreationException as ex:
                # check if 'default' and 'Default' buckets exist
                self.log.info("BucketCreationException was thrown as expected")
                self.log.info(ex.message)
Пример #39
0
 def test_non_default_moxi(self):
     name = "new-bucket-{0}".format(uuid.uuid4())
     for serverInfo in self.servers:
         replicas = [0, 1, 2, 3]
         for replicaNumber in replicas:
             rest = RestConnection(serverInfo)
             proxyPort = rest.get_nodes_self().moxi + 2000
             rest.create_bucket(bucket=name, ramQuotaMB=200, replicaNumber=replicaNumber, proxyPort=proxyPort)
             remote = RemoteMachineShellConnection(serverInfo)
             msg = "create_bucket succeeded but bucket {0} does not exist".format(name)
             self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(name, rest), msg=msg)
             rest.delete_bucket(name)
             msg = 'bucket "{0}" was not deleted even after waiting for 30 seconds'.format(name)
             self.assertTrue(
                 BucketOperationHelper.wait_for_bucket_deletion(name, rest, timeout_in_seconds=30), msg=msg
             )
             msg = "bucket {0} data files are not deleted after bucket deleted from membase".format(name)
             self.assertTrue(
                 self.wait_for_data_files_deletion(name, remote_connection=remote, rest=rest, timeout_in_seconds=20),
                 msg=msg,
             )
             BucketOperationHelper.delete_bucket_or_assert(serverInfo, name, self)
Пример #40
0
 def test_valid_length(self):
     max_len = 100
     name_len = self.input.param('name_length', 100)
     name = 'a' * name_len
     master = self.servers[0]
     rest = RestConnection(master)
     try:
         rest.create_bucket(bucket=name,
                            ramQuotaMB=256,
                            storageBackend=self.bucket_storage)
         if name_len <= max_len:
             msg = 'failed to start up bucket with valid length'
             self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
                 name, rest),
                             msg=msg)
         else:
             self.fail('Bucket with invalid length created')
     except BucketCreationException as ex:
         self.log.error(ex)
         if name_len <= max_len:
             self.fail('could not create bucket with valid length')
         else:
             self.log.info(
                 'bucket with invalid length not created as expected')
Пример #41
0
 def test_bucket_edit_password(self,
                               bucket_name='secretsbucket',
                               num_replicas=1,
                               bucket_size=100):
     updated_pass = "******"
     rest = RestConnection(self.master)
     for servers in self.servers:
         self.secretmgmt_base_obj.setup_pass_node(servers, self.password)
     bucket_type = self.input.param("bucket_type", 'standard')
     tasks = []
     if bucket_type == 'sasl':
         self.cluster.create_sasl_bucket(self.master, bucket_name,
                                         self.password, num_replicas,
                                         bucket_size)
         self.sleep(10)
         rest.change_bucket_props(bucket_name, saslPassword=updated_pass)
     else:
         self.log.error('Bucket type not specified')
         return
     self.assertTrue(
         BucketOperationHelper.wait_for_bucket_creation(
             bucket_name, RestConnection(self.master)),
         msg='failed to start up bucket with name "{0}'.format(bucket_name))
     gen_load = BlobGenerator('buckettest',
                              'buckettest-',
                              self.value_size,
                              start=0,
                              end=self.num_items)
     self._load_all_buckets(self.master, gen_load, "create", 0)
     install_path = self.secretmgmt_base_obj._get_install_path(self.master)
     temp_result = self.secretmgmt_base_obj.check_config_files(
         self.master, install_path, '/config/config.dat', updated_pass)
     self.assertTrue(temp_result, "Password found in config.dat")
     temp_result = self.secretmgmt_base_obj.check_config_files(
         self.master, install_path, 'isasl.pw', updated_pass)
     self.assertTrue(temp_result, "Password found in isasl.pw")
Пример #42
0
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.master = TestInputSingleton.input.servers[0]
        ClusterOperationHelper.cleanup_cluster([self.master])
        BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)

        self._bucket_name = 'default'

        serverInfo = self.master
        rest = RestConnection(serverInfo)
        info = rest.get_nodes_self()
        self._bucket_port = info.moxi
        rest.init_cluster(username=serverInfo.rest_username,
                          password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        bucket_ram = info.memoryQuota * 2 / 3
        rest.create_bucket(bucket=self._bucket_name,
                           ramQuotaMB=bucket_ram,
                           proxyPort=info.memcached)
        msg = 'create_bucket succeeded but bucket "default" does not exist'
        self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(self._bucket_name, rest), msg=msg)
        ready = BucketOperationHelper.wait_for_memcached(serverInfo,self._bucket_name)
        self.assertTrue(ready, "wait_for_memcached failed")
        self._log_start()
Пример #43
0
    def _install_and_upgrade(self,
                             initial_version='1.6.5.3',
                             initialize_cluster=False,
                             create_buckets=False,
                             insert_data=False):
        log = logger.Logger.get_logger()
        input = TestInputSingleton.input
        version = input.test_params['version']
        rest_settings = input.membase_settings
        servers = input.servers
        server = servers[0]
        is_amazon = False
        if input.test_params.get('amazon', False):
            is_amazon = True
        remote = RemoteMachineShellConnection(server)
        rest = RestConnection(server)
        info = remote.extract_remote_info()
        remote.membase_uninstall()
        remote.couchbase_uninstall()
        builds, changes = BuildQuery().get_all_builds()
        #release_builds = BuildQuery().get_all_release_builds(initial_version)

        #if initial_version == "1.7.2":
        #   initial_version = "1.7.2r-20"
        older_build = BuildQuery().find_membase_release_build(
            deliverable_type=info.deliverable_type,
            os_architecture=info.architecture_type,
            build_version=initial_version,
            product='membase-server-enterprise',
            is_amazon=is_amazon)
        if info.type.lower() == 'windows':
            if older_build.product_version.startswith("1.8"):
                abbr_product = "cb"
            else:
                abbr_product = "mb"
            remote.download_binary_in_win(older_build.url, abbr_product,
                                          initial_version)
            remote.install_server_win(older_build, initial_version)
            RestHelper(rest).is_ns_server_running(
                testconstants.NS_SERVER_TIMEOUT)
            rest.init_cluster(rest_settings.rest_username,
                              rest_settings.rest_password)
            bucket_data = {}
            if initialize_cluster:
                rest.init_cluster_memoryQuota(
                    memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
                if create_buckets:
                    _create_load_multiple_bucket(self,
                                                 server,
                                                 bucket_data,
                                                 howmany=2)
            if version.startswith("1.8"):
                abbr_product = "cb"
            appropriate_build = _get_build(servers[0],
                                           version,
                                           is_amazon=is_amazon)
            self.assertTrue(appropriate_build.url,
                            msg="unable to find build {0}".format(version))
            remote.download_binary_in_win(appropriate_build.url, abbr_product,
                                          version)
            remote.stop_membase()
            log.info("###### START UPGRADE. #########")
            remote.membase_upgrade_win(info.architecture_type,
                                       info.windows_name, version,
                                       initial_version)
            remote.disconnect()
            RestHelper(rest).is_ns_server_running(
                testconstants.NS_SERVER_TIMEOUT)

            pools_info = rest.get_pools_info()

            rest.init_cluster(rest_settings.rest_username,
                              rest_settings.rest_password)
            time.sleep(TIMEOUT_SECS)
            # verify admin_creds still set

            self.assertTrue(pools_info['implementationVersion'],
                            appropriate_build.product_version)
            if initialize_cluster:
                #TODO: how can i verify that the cluster init config is preserved
                if create_buckets:
                    self.assertTrue(
                        BucketOperationHelper.wait_for_bucket_creation(
                            'bucket-0', rest),
                        msg="bucket 'default' does not exist..")
                if insert_data:
                    buckets = rest.get_buckets()
                    for bucket in buckets:
                        BucketOperationHelper.keys_exist_or_assert(
                            bucket_data[bucket.name]["inserted_keys"], server,
                            bucket.name, self)
        else:
            log.error("This is not windows server!")
Пример #44
0
    def test_valid_bucket_name(self, password='******'):
        tasks = []
        shared_params = self._create_bucket_params(server=self.server,
                                                   size=self.bucket_size,
                                                   replicas=self.num_replicas)
        if self.bucket_type == 'sasl':
            self.cluster.create_sasl_bucket(name=self.bucket_name,
                                            password=password,
                                            bucket_params=shared_params)
            self.buckets.append(
                Bucket(name=self.bucket_name,
                       num_replicas=self.num_replicas,
                       bucket_size=self.bucket_size,
                       master_id=self.server))
        elif self.bucket_type == 'standard':
            self.cluster.create_standard_bucket(name=self.bucket_name,
                                                port=STANDARD_BUCKET_PORT + 1,
                                                bucket_params=shared_params)
            self.buckets.append(
                Bucket(name=self.bucket_name,
                       num_replicas=self.num_replicas,
                       bucket_size=self.bucket_size,
                       port=STANDARD_BUCKET_PORT + 1,
                       master_id=self.server))
        elif self.bucket_type == "memcached":
            tasks.append(
                self.cluster.async_create_memcached_bucket(
                    name=self.bucket_name,
                    port=STANDARD_BUCKET_PORT + 1,
                    bucket_params=shared_params))

            self.buckets.append(
                Bucket(name=self.bucket_name,
                       num_replicas=self.num_replicas,
                       bucket_size=self.bucket_size,
                       port=STANDARD_BUCKET_PORT + 1,
                       master_id=self.server,
                       type='memcached'))
            for task in tasks:
                task.result()
        else:
            self.log.error('Bucket type not specified')
            return
        self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
            self.bucket_name, self.rest),
                        msg='failed to start up bucket with name "{0}'.format(
                            self.bucket_name))
        if self.bucket_type == "memcached":
            mc = MemcachedClient(self.master.ip, 11210)
            mc.sasl_auth_plain(self.master.rest_username,
                               self.master.rest_password)
            mc.bucket_select(self.bucket_name)
            for i in range(self.num_items):
                Key = "key" + str(i)
                try:
                    mc.set(Key, 0, 0, "value1")
                except MemcachedError as error:
                    self.fail("Error on creating a doc")
        else:
            gen_load = BlobGenerator('buckettest',
                                     'buckettest-',
                                     self.value_size,
                                     start=0,
                                     end=self.num_items)
            self._load_all_buckets(self.server, gen_load, "create", 0)
        self.cluster.bucket_delete(self.server, self.bucket_name)
        self.assertTrue(
            BucketOperationHelper.wait_for_bucket_deletion(
                self.bucket_name, self.rest, timeout_in_seconds=60),
            msg='bucket "{0}" was not deleted even after waiting for 30 seconds'
            .format(self.bucket_name))
Пример #45
0
    def test_buckets(self, bucket_count=0, ops=0, max_time=0, replicas=[1]):
        bucket_ram = 100
        if not bucket_count:
            bucket_count = info.memoryQuota / bucket_ram
        if bucket_count > info.memoryQuota / bucket_ram:
            self.log.error('node does not have enough capacity for {0} buckets, exiting test'.format(bucket_count))
            return

        max_load_memory = bucket_ram * 3 / 4
        max_load_time = max_time

        load_info = {
            'server_info' : self.servers,
            'memcached_info' : {
                'bucket_name':"",
                'bucket_port':"11211",
                'bucket_password':"",
            },
            'operation_info' : {
                'operation_distribution':{'set':3, 'get':5},
                'valuesize_distribution':{250:15, 1500:10, 20:5,15000:5},
                'create_percent':25,
                'threads':2*len(self.servers),
                'operation_rate':ops/bucket_count,
            },
            'limit_info' : {
                'max_size':max_load_memory,
                'max_time':max_load_time,
            },
        }

        loads = []

        for i in range(bucket_count):
            bucket_name = 'bucketops-{0}'.format(uuid.uuid4())
            replica = replicas[i%len(replicas)]
            rest.create_bucket(bucket=bucket_name,
                               ramQuotaMB=bucket_ram,
                               replicaNumber=replica,
                               authType='sasl',
                               saslPassword='')
            msg = 'create_bucket succeeded but bucket {0} does not exist'.format(bucket_name)
            self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(bucket_name, rest), msg=msg)
            load_info['memcached_info']['bucket_name'] = bucket_name
            loads.append(load_runner.LoadRunner(load_info, dryrun=False))

        buckets = []
        try:
            buckets = rest.get_buckets()
        except:
            self.log.info('15 seconds sleep before calling get_buckets again...')
            time.sleep(15)
            buckets = rest.get_buckets()
        if len(buckets) != bucket_count:
            msg = 'tried to create {0} buckets, only created {1}'.format(bucket_count, len(buckets))
            self.log.error(msg)
            unittest.fail(msg=msg)

        if ops:
            self.log.info('starting load against all buckets')
            for load in loads:
                load.start()

            if max_load_time:
                end_time = time.time() + max_load_time
                for load in loads:
                    load.wait(end_time - time.time())
                # stop all load if there is any still running
                for load in loads:
                    load.stop()
            else:
                for load in loads:
                    load.wait()

            self.log.info('stopped load against all buckets')
Пример #46
0
    def _install_and_upgrade(
        self, initial_version="1.6.5.3", initialize_cluster=False, create_buckets=False, insert_data=False
    ):
        input = TestInputSingleton.input
        rest_settings = input.membase_settings
        servers = input.servers
        server = servers[0]
        save_upgrade_config = False
        if initial_version.startswith("1.7") and input.test_params["version"].startswith("1.8"):
            save_upgrade_config = True
        is_amazon = False
        if input.test_params.get("amazon", False):
            is_amazon = True
        if initial_version.startswith("1.6") or initial_version.startswith("1.7"):
            product = "membase-server-enterprise"
        else:
            product = "couchbase-server-enterprise"
        remote = RemoteMachineShellConnection(server)
        info = remote.extract_remote_info()
        remote.membase_uninstall()
        remote.couchbase_uninstall()
        builds, changes = BuildQuery().get_all_builds()
        # check to see if we are installing from latestbuilds or releases
        # note: for newer releases (1.8.0) even release versions can have the
        #  form 1.8.0r-55
        if re.search("r", initial_version):
            builds, changes = BuildQuery().get_all_builds()
            older_build = BuildQuery().find_membase_build(
                builds,
                deliverable_type=info.deliverable_type,
                os_architecture=info.architecture_type,
                build_version=initial_version,
                product=product,
                is_amazon=is_amazon,
            )
        else:
            older_build = BuildQuery().find_membase_release_build(
                deliverable_type=info.deliverable_type,
                os_architecture=info.architecture_type,
                build_version=initial_version,
                product=product,
                is_amazon=is_amazon,
            )
        remote.stop_membase()
        remote.stop_couchbase()
        remote.download_build(older_build)
        # now let's install ?
        remote.install_server(older_build)
        rest = RestConnection(server)
        RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
        rest.init_cluster(rest_settings.rest_username, rest_settings.rest_password)
        bucket_data = {}
        if initialize_cluster:
            rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
            if create_buckets:
                _create_load_multiple_bucket(self, server, bucket_data, howmany=2)
        version = input.test_params["version"]

        appropriate_build = _get_build(servers[0], version, is_amazon=is_amazon)
        self.assertTrue(appropriate_build.url, msg="unable to find build {0}".format(version))

        remote.download_build(appropriate_build)
        remote.membase_upgrade(appropriate_build, save_upgrade_config=save_upgrade_config)
        remote.disconnect()
        RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)

        pools_info = rest.get_pools_info()

        rest.init_cluster(rest_settings.rest_username, rest_settings.rest_password)
        time.sleep(TIMEOUT_SECS)
        # verify admin_creds still set

        self.assertTrue(pools_info["implementationVersion"], appropriate_build.product_version)
        if initialize_cluster:
            # TODO: how can i verify that the cluster init config is preserved
            if create_buckets:
                self.assertTrue(
                    BucketOperationHelper.wait_for_bucket_creation("bucket-0", rest),
                    msg="bucket 'default' does not exist..",
                )
            if insert_data:
                buckets = rest.get_buckets()
                for bucket in buckets:
                    BucketOperationHelper.keys_exist_or_assert(
                        bucket_data[bucket.name]["inserted_keys"], server, bucket.name, self
                    )
Пример #47
0
    def _install_and_upgrade(self, initial_version='1.6.5.3',
                             initialize_cluster=False,
                             create_buckets=False,
                             insert_data=False):
        log = logger.Logger.get_logger()
        input = TestInputSingleton.input
        rest_settings = input.membase_settings
        servers = input.servers
        server = servers[0]
        save_upgrade_config = False
        if re.search('1.8',input.test_params['version']):
            save_upgrade_config = True
        is_amazon = False
        if input.test_params.get('amazon',False):
            is_amazon = True
        remote = RemoteMachineShellConnection(server)
        rest = RestConnection(server)
        info = remote.extract_remote_info()
        remote.membase_uninstall()
        remote.couchbase_uninstall()
        builds, changes = BuildQuery().get_all_builds()
        older_build = BuildQuery().find_membase_release_build(deliverable_type=info.deliverable_type,
                                                              os_architecture=info.architecture_type,
                                                              build_version=initial_version,
                                                              product='membase-server-enterprise', is_amazon=is_amazon)
        remote.execute_command('/etc/init.d/membase-server stop')
        remote.download_build(older_build)
        #now let's install ?
        remote.membase_install(older_build)
        RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
        rest.init_cluster_port(rest_settings.rest_username, rest_settings.rest_password)
        bucket_data = {}
        if initialize_cluster:
            rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
            if create_buckets:
                _create_load_multiple_bucket(self, server, bucket_data, howmany=2)
        version = input.test_params['version']

        appropriate_build = _get_build(servers[0], version, is_amazon=is_amazon)
        self.assertTrue(appropriate_build.url, msg="unable to find build {0}".format(version))

        remote.download_build(appropriate_build)
        remote.membase_upgrade(appropriate_build, save_upgrade_config=save_upgrade_config)
        remote.disconnect()
        RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)

        pools_info = rest.get_pools_info()

        rest.init_cluster_port(rest_settings.rest_username, rest_settings.rest_password)
        time.sleep(TIMEOUT_SECS)
        #verify admin_creds still set

        self.assertTrue(pools_info['implementationVersion'], appropriate_build.product_version)
        if initialize_cluster:
            #TODO: how can i verify that the cluster init config is preserved
            if create_buckets:
                self.assertTrue(BucketOperationHelper.wait_for_bucket_creation('bucket-0', rest),
                                msg="bucket 'default' does not exist..")
            if insert_data:
                buckets = rest.get_buckets()
                for bucket in buckets:
                    BucketOperationHelper.keys_exist_or_assert(bucket_data[bucket.name]["inserted_keys"],
                                                               server,
                                                               bucket.name, self)
Пример #48
0
    def _install_and_upgrade(self, initial_version='1.6.5.3',
                             create_buckets=False,
                             insert_data=False,
                             start_upgraded_first=True,
                             load_ratio=-1,
                             roll_upgrade=False,
                             upgrade_path=[]):
        node_upgrade_path = []
        node_upgrade_path.extend(upgrade_path)
        #then start them in whatever order you want
        inserted_keys = []
        log = logger.Logger.get_logger()
        if roll_upgrade:
            log.info("performing a rolling upgrade")
        input = TestInputSingleton.input
        rest_settings = input.membase_settings
        servers = input.servers
        save_upgrade_config = False
        is_amazon = False
        if input.test_params.get('amazon',False):
            is_amazon = True
        # install older build on all nodes
        for server in servers:
            remote = RemoteMachineShellConnection(server)
            rest = RestConnection(server)
            info = remote.extract_remote_info()
            older_build = BuildQuery().find_membase_release_build(deliverable_type=info.deliverable_type,
                                                              os_architecture=info.architecture_type,
                                                              build_version=initial_version,
                                                              product='membase-server-enterprise', is_amazon=is_amazon)

            remote.membase_uninstall()
            remote.couchbase_uninstall()
            remote.execute_command('/etc/init.d/membase-server stop')
            remote.download_build(older_build)
            #now let's install ?
            remote.membase_install(older_build)
            RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
            rest.init_cluster_port(rest_settings.rest_username, rest_settings.rest_password)
            rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
            remote.disconnect()

        bucket_data = {}
        master = servers[0]
        if create_buckets:
            #let's create buckets
            #wait for the bucket
            #bucket port should also be configurable , pass it as the
            #parameter to this test ? later

            self._create_default_bucket(master)
            inserted_keys = self._load_data(master, load_ratio)
            _create_load_multiple_bucket(self, master, bucket_data, howmany=2)

        # cluster all the nodes together
        ClusterOperationHelper.add_all_nodes_or_assert(master,
                                                       servers,
                                                       rest_settings, self)
        rest = RestConnection(master)
        nodes = rest.node_statuses()
        otpNodeIds = []
        for node in nodes:
            otpNodeIds.append(node.id)
        rebalanceStarted = rest.rebalance(otpNodeIds, [])
        self.assertTrue(rebalanceStarted,
                        "unable to start rebalance on master node {0}".format(master.ip))
        log.info('started rebalance operation on master node {0}'.format(master.ip))
        rebalanceSucceeded = rest.monitorRebalance()
        self.assertTrue(rebalanceSucceeded,
                        "rebalance operation for nodes: {0} was not successful".format(otpNodeIds))

        if initial_version == "1.7.0" or initial_version == "1.7.1":
            self._save_config(rest_settings, master)

        input_version = input.test_params['version']
        node_upgrade_path.append(input_version)
        #if we dont want to do roll_upgrade ?
        log.info("Upgrade path: {0} -> {1}".format(initial_version, node_upgrade_path))
        log.info("List of servers {0}".format(servers))
        if not roll_upgrade:
            for version in node_upgrade_path:
                if version is not initial_version:
                    log.info("Upgrading to version {0}".format(version))
                    self._stop_membase_servers(servers)
                    if re.search('1.8', version):
                        save_upgrade_config = True

                    appropriate_build = _get_build(servers[0], version, is_amazon=is_amazon)
                    self.assertTrue(appropriate_build.url, msg="unable to find build {0}".format(version))
                    for server in servers:
                        remote = RemoteMachineShellConnection(server)
                        remote.download_build(appropriate_build)
                        remote.membase_upgrade(appropriate_build, save_upgrade_config=save_upgrade_config)
                        RestHelper(RestConnection(server)).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)

                        #verify admin_creds still set
                        pools_info = RestConnection(server).get_pools_info()
                        self.assertTrue(pools_info['implementationVersion'], appropriate_build.product_version)

                        if start_upgraded_first:
                            log.info("Starting server {0} post upgrade".format(server))
                            remote.start_membase()
                        else:
                            remote.stop_membase()

                        remote.disconnect()
                    if not start_upgraded_first:
                        log.info("Starting all servers together")
                        self._start_membase_servers(servers)
                    time.sleep(TIMEOUT_SECS)
                    if version == "1.7.0" or version == "1.7.1":
                        self._save_config(rest_settings, master)

                    if create_buckets:
                        self.assertTrue(BucketOperationHelper.wait_for_bucket_creation('default', RestConnection(master)),
                                        msg="bucket 'default' does not exist..")
                    if insert_data:
                        self._verify_data(master, rest, inserted_keys)

        # rolling upgrade
        else:
            version = input.test_params['version']
            appropriate_build = _get_build(servers[0], version, is_amazon=is_amazon)
            self.assertTrue(appropriate_build.url, msg="unable to find build {0}".format(version))
            # rebalance node out
            # remove membase from node
            # install destination version onto node
            # rebalance it back into the cluster
            for server_index in range(len(servers)):
                server = servers[server_index]
                master = servers[server_index - 1]
                log.info("current master is {0}, rolling node is {1}".format(master, server))

                rest = RestConnection(master)
                nodes = rest.node_statuses()
                allNodes = []
                toBeEjectedNodes = []
                for node in nodes:
                    allNodes.append(node.id)
                    if "{0}:{1}".format(node.ip, node.port) == "{0}:{1}".format(server.ip, server.port):
                        toBeEjectedNodes.append(node.id)
                helper = RestHelper(rest)
                removed = helper.remove_nodes(knownNodes=allNodes, ejectedNodes=toBeEjectedNodes)
                self.assertTrue(removed, msg="Unable to remove nodes {0}".format(toBeEjectedNodes))
                remote = RemoteMachineShellConnection(server)
                remote.membase_uninstall()
                remote.couchbase_uninstall()
                remote.download_build(appropriate_build)
                remote.membase_install(appropriate_build)
                RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
                log.info("sleep for 10 seconds to wait for membase-server to start...")
                time.sleep(TIMEOUT_SECS)
                rest.init_cluster_port(rest_settings.rest_username, rest_settings.rest_password)
                rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
                remote.disconnect()

                #readd this to the cluster
                ClusterOperationHelper.add_all_nodes_or_assert(master, [server], rest_settings, self)
                nodes = rest.node_statuses()
                otpNodeIds = []
                for node in nodes:
                    otpNodeIds.append(node.id)
                rebalanceStarted = rest.rebalance(otpNodeIds, [])
                self.assertTrue(rebalanceStarted,
                                "unable to start rebalance on master node {0}".format(master.ip))
                log.info('started rebalance operation on master node {0}'.format(master.ip))
                rebalanceSucceeded = rest.monitorRebalance()
                self.assertTrue(rebalanceSucceeded,
                                "rebalance operation for nodes: {0} was not successful".format(otpNodeIds))
                #ClusterOperationHelper.verify_persistence(servers, self)

            #TODO: how can i verify that the cluster init config is preserved
            # verify data on upgraded nodes
            if create_buckets:
                self.assertTrue(BucketOperationHelper.wait_for_bucket_creation('default', RestConnection(master)),
                                msg="bucket 'default' does not exist..")
            if insert_data:
                self._verify_data(master, rest, inserted_keys)
                rest = RestConnection(master)
                buckets = rest.get_buckets()
                for bucket in buckets:
                    BucketOperationHelper.keys_exist_or_assert(bucket_data[bucket.name]["inserted_keys"],
                                                               master,
                                                               bucket.name, self)
Пример #49
0
    def _install_and_upgrade(self,
                             initial_version='1.6.5.3',
                             create_buckets=False,
                             insert_data=False,
                             start_upgraded_first=True,
                             load_ratio=-1,
                             roll_upgrade=False,
                             upgrade_path=[]):
        node_upgrade_path = []
        node_upgrade_path.extend(upgrade_path)
        #then start them in whatever order you want
        inserted_keys = []
        log = logger.Logger.get_logger()
        if roll_upgrade:
            log.info("performing a rolling upgrade")
        input = TestInputSingleton.input
        input_version = input.test_params['version']
        rest_settings = input.membase_settings
        servers = input.servers
        is_amazon = False
        if input.test_params.get('amazon', False):
            is_amazon = True

        # install older build on all nodes
        for server in servers:
            remote = RemoteMachineShellConnection(server)
            rest = RestConnection(server)
            info = remote.extract_remote_info()
            older_build = BuildQuery().find_membase_release_build(
                deliverable_type=info.deliverable_type,
                os_architecture=info.architecture_type,
                build_version=initial_version,
                product='membase-server-enterprise',
                is_amazon=is_amazon)

            remote.membase_uninstall()
            remote.couchbase_uninstall()
            if older_build.product_version.startswith("1.8"):
                abbr_product = "cb"
            else:
                abbr_product = "mb"
            remote.download_binary_in_win(older_build.url, abbr_product,
                                          initial_version)
            #now let's install ?
            remote.install_server_win(older_build, initial_version)
            RestHelper(rest).is_ns_server_running(
                testconstants.NS_SERVER_TIMEOUT)
            rest.init_cluster(rest_settings.rest_username,
                              rest_settings.rest_password)
            rest.init_cluster_memoryQuota(
                memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
            remote.disconnect()

        bucket_data = {}
        master = servers[0]
        # cluster all the nodes together
        ClusterOperationHelper.add_all_nodes_or_assert(master, servers,
                                                       rest_settings, self)
        rest = RestConnection(master)
        nodes = rest.node_statuses()
        otpNodeIds = []
        for node in nodes:
            otpNodeIds.append(node.id)
        rebalanceStarted = rest.rebalance(otpNodeIds, [])
        self.assertTrue(
            rebalanceStarted,
            "unable to start rebalance on master node {0}".format(master.ip))
        log.info('started rebalance operation on master node {0}'.format(
            master.ip))
        rebalanceSucceeded = rest.monitorRebalance()
        self.assertTrue(
            rebalanceSucceeded,
            "rebalance operation for nodes: {0} was not successful".format(
                otpNodeIds))

        if create_buckets:
            #let's create buckets
            #wait for the bucket
            #bucket port should also be configurable , pass it as the
            #parameter to this test ? later

            self._create_default_bucket(master)
            inserted_keys = self._load_data(master, load_ratio)
            _create_load_multiple_bucket(self, master, bucket_data, howmany=2)

        #if initial_version == "1.7.0" or initial_version == "1.7.1":
        #   self._save_config(rest_settings, master)

        node_upgrade_path.append(input_version)
        #if we dont want to do roll_upgrade ?
        log.info("Upgrade path: {0} -> {1}".format(initial_version,
                                                   node_upgrade_path))
        log.info("List of servers {0}".format(servers))
        if not roll_upgrade:
            for version in node_upgrade_path:
                if version is not initial_version:
                    log.info(
                        "SHUTDOWN ALL CB OR MB SERVERS IN CLUSTER BEFORE DOING UPGRADE"
                    )
                    for server in servers:
                        shell = RemoteMachineShellConnection(server)
                        shell.stop_membase()
                        shell.disconnect()
                    log.info("Upgrading to version {0}".format(version))
                    appropriate_build = _get_build(servers[0],
                                                   version,
                                                   is_amazon=is_amazon)
                    self.assertTrue(
                        appropriate_build.url,
                        msg="unable to find build {0}".format(version))
                    for server in servers:
                        remote = RemoteMachineShellConnection(server)
                        if version.startswith("1.8"):
                            abbr_product = "cb"
                        remote.download_binary_in_win(appropriate_build.url,
                                                      abbr_product, version)
                        log.info("###### START UPGRADE. #########")
                        remote.membase_upgrade_win(info.architecture_type,
                                                   info.windows_name, version,
                                                   initial_version)
                        RestHelper(
                            RestConnection(server)).is_ns_server_running(
                                testconstants.NS_SERVER_TIMEOUT)

                        #verify admin_creds still set
                        pools_info = RestConnection(server).get_pools_info()
                        self.assertTrue(pools_info['implementationVersion'],
                                        appropriate_build.product_version)

                        if not start_upgraded_first:
                            remote.stop_membase()

                        remote.disconnect()
                    if not start_upgraded_first:
                        log.info("Starting all servers together")
                        self._start_membase_servers(servers)
                    time.sleep(TIMEOUT_SECS)

                    if create_buckets:
                        self.assertTrue(
                            BucketOperationHelper.wait_for_bucket_creation(
                                'default', RestConnection(master)),
                            msg="bucket 'default' does not exist..")
                    if insert_data:
                        self._verify_data(master, rest, inserted_keys)

        # rolling upgrade
        else:
            version = input.test_params['version']
            if version.startswith("1.8"):
                abbr_product = "cb"
            appropriate_build = _get_build(servers[0],
                                           version,
                                           is_amazon=is_amazon)
            self.assertTrue(appropriate_build.url,
                            msg="unable to find build {0}".format(version))
            # rebalance node out
            # remove membase from node
            # install destination version onto node
            # rebalance it back into the cluster
            for server_index in range(len(servers)):
                server = servers[server_index]
                master = servers[server_index - 1]
                log.info("current master is {0}, rolling node is {1}".format(
                    master, server))

                rest = RestConnection(master)
                nodes = rest.node_statuses()
                allNodes = []
                toBeEjectedNodes = []
                for node in nodes:
                    allNodes.append(node.id)
                    if "{0}:{1}".format(node.ip,
                                        node.port) == "{0}:{1}".format(
                                            server.ip, server.port):
                        toBeEjectedNodes.append(node.id)
                helper = RestHelper(rest)
                removed = helper.remove_nodes(knownNodes=allNodes,
                                              ejectedNodes=toBeEjectedNodes)
                self.assertTrue(
                    removed,
                    msg="Unable to remove nodes {0}".format(toBeEjectedNodes))
                remote = RemoteMachineShellConnection(server)
                remote.membase_uninstall()
                remote.couchbase_uninstall()
                if appropriate_build.product == 'membase-server-enterprise':
                    abbr_product = "mb"
                else:
                    abbr_product = "cb"
                remote.download_binary_in_win(appropriate_build.url,
                                              abbr_product, version)
                remote.install_server_win(appropriate_build, version)
                RestHelper(rest).is_ns_server_running(
                    testconstants.NS_SERVER_TIMEOUT)
                time.sleep(TIMEOUT_SECS)
                rest.init_cluster(rest_settings.rest_username,
                                  rest_settings.rest_password)
                rest.init_cluster_memoryQuota(
                    memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
                remote.disconnect()

                #readd this to the cluster
                ClusterOperationHelper.add_all_nodes_or_assert(
                    master, [server], rest_settings, self)
                nodes = rest.node_statuses()
                log.info(
                    "wait 30 seconds before asking older node for start rebalance"
                )
                time.sleep(30)
                otpNodeIds = []
                for node in nodes:
                    otpNodeIds.append(node.id)
                rebalanceStarted = rest.rebalance(otpNodeIds, [])
                self.assertTrue(
                    rebalanceStarted,
                    "unable to start rebalance on master node {0}".format(
                        master.ip))
                log.info(
                    'started rebalance operation on master node {0}'.format(
                        master.ip))
                rebalanceSucceeded = rest.monitorRebalance()
                self.assertTrue(
                    rebalanceSucceeded,
                    "rebalance operation for nodes: {0} was not successful".
                    format(otpNodeIds))
                #ClusterOperationHelper.verify_persistence(servers, self)

            #TODO: how can i verify that the cluster init config is preserved
            # verify data on upgraded nodes
            if create_buckets:
                self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
                    'default', RestConnection(master)),
                                msg="bucket 'default' does not exist..")
            if insert_data:
                self._verify_data(master, rest, inserted_keys)
                rest = RestConnection(master)
                buckets = rest.get_buckets()
                for bucket in buckets:
                    BucketOperationHelper.keys_exist_or_assert(
                        bucket_data[bucket.name]["inserted_keys"], master,
                        bucket.name, self)
Пример #50
0
    def _install_and_upgrade(self,
                             initial_version='1.6.5.3',
                             create_buckets=False,
                             insert_data=False,
                             start_upgraded_first=True,
                             load_ratio=-1,
                             roll_upgrade=False,
                             upgrade_path=[],
                             do_new_rest=False):
        node_upgrade_path = []
        node_upgrade_path.extend(upgrade_path)
        #then start them in whatever order you want
        inserted_keys = []
        log = logger.Logger.get_logger()
        if roll_upgrade:
            log.info("performing an online upgrade")
        input = TestInputSingleton.input
        rest_settings = input.membase_settings
        servers = input.servers
        save_upgrade_config = False
        is_amazon = False
        if input.test_params.get('amazon', False):
            is_amazon = True
        if initial_version.startswith("1.6") or initial_version.startswith(
                "1.7"):
            product = 'membase-server-enterprise'
        else:
            product = 'couchbase-server-enterprise'
        # install older build on all nodes
        for server in servers:
            remote = RemoteMachineShellConnection(server)
            rest = RestConnection(server)
            info = remote.extract_remote_info()
            # check to see if we are installing from latestbuilds or releases
            # note: for newer releases (1.8.0) even release versions can have the
            #  form 1.8.0r-55
            if re.search('r', initial_version):
                builds, changes = BuildQuery().get_all_builds()
                older_build = BuildQuery().find_membase_build(
                    builds,
                    deliverable_type=info.deliverable_type,
                    os_architecture=info.architecture_type,
                    build_version=initial_version,
                    product=product,
                    is_amazon=is_amazon)

            else:
                older_build = BuildQuery().find_membase_release_build(
                    deliverable_type=info.deliverable_type,
                    os_architecture=info.architecture_type,
                    build_version=initial_version,
                    product=product,
                    is_amazon=is_amazon)

            remote.membase_uninstall()
            remote.couchbase_uninstall()
            remote.stop_membase()
            remote.stop_couchbase()
            remote.download_build(older_build)
            #now let's install ?
            remote.membase_install(older_build)
            RestHelper(rest).is_ns_server_running(
                testconstants.NS_SERVER_TIMEOUT)
            rest.init_cluster_port(rest_settings.rest_username,
                                   rest_settings.rest_password)
            rest.init_cluster_memoryQuota(
                memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
            remote.disconnect()

        bucket_data = {}
        master = servers[0]
        if create_buckets:
            #let's create buckets
            #wait for the bucket
            #bucket port should also be configurable , pass it as the
            #parameter to this test ? later

            self._create_default_bucket(master)
            inserted_keys = self._load_data(master, load_ratio)
            _create_load_multiple_bucket(self, master, bucket_data, howmany=2)

        # cluster all the nodes together
        ClusterOperationHelper.add_all_nodes_or_assert(master, servers,
                                                       rest_settings, self)
        rest = RestConnection(master)
        nodes = rest.node_statuses()
        otpNodeIds = []
        for node in nodes:
            otpNodeIds.append(node.id)
        rebalanceStarted = rest.rebalance(otpNodeIds, [])
        self.assertTrue(
            rebalanceStarted,
            "unable to start rebalance on master node {0}".format(master.ip))
        log.info('started rebalance operation on master node {0}'.format(
            master.ip))
        rebalanceSucceeded = rest.monitorRebalance()
        self.assertTrue(
            rebalanceSucceeded,
            "rebalance operation for nodes: {0} was not successful".format(
                otpNodeIds))

        if initial_version == "1.7.0" or initial_version == "1.7.1":
            self._save_config(rest_settings, master)

        input_version = input.test_params['version']
        node_upgrade_path.append(input_version)
        current_version = initial_version
        previous_version = current_version
        #if we dont want to do roll_upgrade ?
        log.info("Upgrade path: {0} -> {1}".format(initial_version,
                                                   node_upgrade_path))
        log.info("List of servers {0}".format(servers))
        if not roll_upgrade:
            for version in node_upgrade_path:
                previous_version = current_version
                current_version = version
                if version != initial_version:
                    log.info("Upgrading to version {0}".format(version))
                    self._stop_membase_servers(servers)
                    if previous_version.startswith(
                            "1.7") and current_version.startswith("1.8"):
                        save_upgrade_config = True
                    # No need to save the upgrade config from 180 to 181
                    if previous_version.startswith(
                            "1.8.0") and current_version.startswith("1.8.1"):
                        save_upgrade_config = False
                    appropriate_build = _get_build(servers[0],
                                                   version,
                                                   is_amazon=is_amazon)
                    self.assertTrue(
                        appropriate_build.url,
                        msg="unable to find build {0}".format(version))
                    for server in servers:
                        remote = RemoteMachineShellConnection(server)
                        remote.download_build(appropriate_build)
                        remote.membase_upgrade(
                            appropriate_build,
                            save_upgrade_config=save_upgrade_config)
                        RestHelper(
                            RestConnection(server)).is_ns_server_running(
                                testconstants.NS_SERVER_TIMEOUT)

                        #verify admin_creds still set
                        pools_info = RestConnection(server).get_pools_info()
                        self.assertTrue(pools_info['implementationVersion'],
                                        appropriate_build.product_version)

                        if start_upgraded_first:
                            log.info("Starting server {0} post upgrade".format(
                                server))
                            remote.start_membase()
                        else:
                            remote.stop_membase()

                        remote.disconnect()
                    if not start_upgraded_first:
                        log.info("Starting all servers together")
                        self._start_membase_servers(servers)
                    time.sleep(TIMEOUT_SECS)
                    if version == "1.7.0" or version == "1.7.1":
                        self._save_config(rest_settings, master)

                    if create_buckets:
                        self.assertTrue(
                            BucketOperationHelper.wait_for_bucket_creation(
                                'default', RestConnection(master)),
                            msg="bucket 'default' does not exist..")
                    if insert_data:
                        self._verify_data(master, rest, inserted_keys)

        # rolling upgrade
        else:
            version = input.test_params['version']
            appropriate_build = _get_build(servers[0],
                                           version,
                                           is_amazon=is_amazon)
            self.assertTrue(appropriate_build.url,
                            msg="unable to find build {0}".format(version))
            # rebalance node out
            # remove membase from node
            # install destination version onto node
            # rebalance it back into the cluster
            for server_index in range(len(servers)):
                server = servers[server_index]
                master = servers[server_index - 1]
                log.info("current master is {0}, rolling node is {1}".format(
                    master, server))

                rest = RestConnection(master)
                nodes = rest.node_statuses()
                allNodes = []
                toBeEjectedNodes = []
                for node in nodes:
                    allNodes.append(node.id)
                    if "{0}:{1}".format(node.ip,
                                        node.port) == "{0}:{1}".format(
                                            server.ip, server.port):
                        toBeEjectedNodes.append(node.id)
                helper = RestHelper(rest)
                removed = helper.remove_nodes(knownNodes=allNodes,
                                              ejectedNodes=toBeEjectedNodes)
                self.assertTrue(
                    removed,
                    msg="Unable to remove nodes {0}".format(toBeEjectedNodes))
                remote = RemoteMachineShellConnection(server)
                remote.download_build(appropriate_build)
                # if initial version is 180
                # Don't uninstall the server
                if not initial_version.startswith('1.8.0'):
                    remote.membase_uninstall()
                    remote.couchbase_uninstall()
                    remote.membase_install(appropriate_build)
                else:
                    remote.membase_upgrade(appropriate_build)

                RestHelper(rest).is_ns_server_running(
                    testconstants.NS_SERVER_TIMEOUT)
                log.info(
                    "sleep for 10 seconds to wait for membase-server to start..."
                )
                time.sleep(TIMEOUT_SECS)
                rest.init_cluster_port(rest_settings.rest_username,
                                       rest_settings.rest_password)
                rest.init_cluster_memoryQuota(
                    memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
                remote.disconnect()

                #readd this to the cluster
                ClusterOperationHelper.add_all_nodes_or_assert(
                    master, [server], rest_settings, self)
                nodes = rest.node_statuses()
                otpNodeIds = []
                for node in nodes:
                    otpNodeIds.append(node.id)
                # Issue rest call to the newly added node
                # MB-5108
                if do_new_rest:
                    master = server
                    rest = RestConnection(master)
                rebalanceStarted = rest.rebalance(otpNodeIds, [])
                self.assertTrue(
                    rebalanceStarted,
                    "unable to start rebalance on master node {0}".format(
                        master.ip))
                log.info(
                    'started rebalance operation on master node {0}'.format(
                        master.ip))
                rebalanceSucceeded = rest.monitorRebalance()
                self.assertTrue(
                    rebalanceSucceeded,
                    "rebalance operation for nodes: {0} was not successful".
                    format(otpNodeIds))

            #TODO: how can i verify that the cluster init config is preserved
            # verify data on upgraded nodes
            if create_buckets:
                self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
                    'default', RestConnection(master)),
                                msg="bucket 'default' does not exist..")
            if insert_data:
                self._verify_data(master, rest, inserted_keys)
                rest = RestConnection(master)
                buckets = rest.get_buckets()
                for bucket in buckets:
                    BucketOperationHelper.keys_exist_or_assert(
                        bucket_data[bucket.name]["inserted_keys"], master,
                        bucket.name, self)
Пример #51
0
    def _install_and_upgrade(self,
                             initial_version='1.6.5.3',
                             initialize_cluster=False,
                             create_buckets=False,
                             insert_data=False):
        input = TestInputSingleton.input
        rest_settings = input.membase_settings
        servers = input.servers
        server = servers[0]
        save_upgrade_config = False
        if initial_version.startswith(
                "1.7") and input.test_params['version'].startswith("1.8"):
            save_upgrade_config = True
        is_amazon = False
        if input.test_params.get('amazon', False):
            is_amazon = True
        if initial_version.startswith("1.6") or initial_version.startswith(
                "1.7"):
            product = 'membase-server-enterprise'
        else:
            product = 'couchbase-server-enterprise'
        remote = RemoteMachineShellConnection(server)
        rest = RestConnection(server)
        info = remote.extract_remote_info()
        remote.membase_uninstall()
        remote.couchbase_uninstall()
        builds, changes = BuildQuery().get_all_builds()
        # check to see if we are installing from latestbuilds or releases
        # note: for newer releases (1.8.0) even release versions can have the
        #  form 1.8.0r-55
        if re.search('r', initial_version):
            builds, changes = BuildQuery().get_all_builds()
            older_build = BuildQuery().find_membase_build(
                builds,
                deliverable_type=info.deliverable_type,
                os_architecture=info.architecture_type,
                build_version=initial_version,
                product=product,
                is_amazon=is_amazon)
        else:
            older_build = BuildQuery().find_membase_release_build(
                deliverable_type=info.deliverable_type,
                os_architecture=info.architecture_type,
                build_version=initial_version,
                product=product,
                is_amazon=is_amazon)
        remote.stop_membase()
        remote.stop_couchbase()
        remote.download_build(older_build)
        #now let's install ?
        remote.membase_install(older_build)
        RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
        rest.init_cluster_port(rest_settings.rest_username,
                               rest_settings.rest_password)
        bucket_data = {}
        if initialize_cluster:
            rest.init_cluster_memoryQuota(
                memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
            if create_buckets:
                _create_load_multiple_bucket(self,
                                             server,
                                             bucket_data,
                                             howmany=2)
        version = input.test_params['version']

        appropriate_build = _get_build(servers[0],
                                       version,
                                       is_amazon=is_amazon)
        self.assertTrue(appropriate_build.url,
                        msg="unable to find build {0}".format(version))

        remote.download_build(appropriate_build)
        remote.membase_upgrade(appropriate_build,
                               save_upgrade_config=save_upgrade_config)
        remote.disconnect()
        RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)

        pools_info = rest.get_pools_info()

        rest.init_cluster_port(rest_settings.rest_username,
                               rest_settings.rest_password)
        time.sleep(TIMEOUT_SECS)
        #verify admin_creds still set

        self.assertTrue(pools_info['implementationVersion'],
                        appropriate_build.product_version)
        if initialize_cluster:
            #TODO: how can i verify that the cluster init config is preserved
            if create_buckets:
                self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
                    'bucket-0', rest),
                                msg="bucket 'default' does not exist..")
            if insert_data:
                buckets = rest.get_buckets()
                for bucket in buckets:
                    BucketOperationHelper.keys_exist_or_assert(
                        bucket_data[bucket.name]["inserted_keys"], server,
                        bucket.name, self)
Пример #52
0
    def _install_and_upgrade(self, initial_version='1.6.5.3',
                             initialize_cluster=False,
                             create_buckets=False,
                             insert_data=False):
        log = logger.Logger.get_logger()
        input = TestInputSingleton.input
        version = input.test_params['version']
        rest_settings = input.membase_settings
        servers = input.servers
        server = servers[0]
        is_amazon = False
        if input.test_params.get('amazon',False):
            is_amazon = True
        remote = RemoteMachineShellConnection(server)
        rest = RestConnection(server)
        info = remote.extract_remote_info()
        remote.membase_uninstall()
        remote.couchbase_uninstall()
        builds, changes = BuildQuery().get_all_builds()
        #release_builds = BuildQuery().get_all_release_builds(initial_version)

        #if initial_version == "1.7.2":
         #   initial_version = "1.7.2r-20"
        older_build = BuildQuery().find_membase_release_build(deliverable_type=info.deliverable_type,
                                                              os_architecture=info.architecture_type,
                                                              build_version=initial_version,
                                                              product='membase-server-enterprise', is_amazon=is_amazon)
        if info.type.lower() == 'windows':
            if older_build.product_version.startswith("1.8"):
                abbr_product = "cb"
            else:
                abbr_product = "mb"
            remote.download_binary_in_win(older_build.url, abbr_product, initial_version)
            remote.membase_install_win(older_build, initial_version)
            RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
            rest.init_cluster_port(rest_settings.rest_username, rest_settings.rest_password)
            bucket_data = {}
            if initialize_cluster:
                rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
                if create_buckets:
                    _create_load_multiple_bucket(self, server, bucket_data, howmany=2)
            if version.startswith("1.8"):
                abbr_product = "cb"
            appropriate_build = _get_build(servers[0], version, is_amazon=is_amazon)
            self.assertTrue(appropriate_build.url, msg="unable to find build {0}".format(version))
            remote.download_binary_in_win(appropriate_build.url, abbr_product, version)
            remote.stop_membase()
            log.info("###### START UPGRADE. #########")
            remote.membase_upgrade_win(info.architecture_type, info.windows_name, version, initial_version)
            remote.disconnect()
            RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)

            pools_info = rest.get_pools_info()

            rest.init_cluster_port(rest_settings.rest_username, rest_settings.rest_password)
            time.sleep(TIMEOUT_SECS)
            # verify admin_creds still set

            self.assertTrue(pools_info['implementationVersion'], appropriate_build.product_version)
            if initialize_cluster:
                #TODO: how can i verify that the cluster init config is preserved
                if create_buckets:
                    self.assertTrue(BucketOperationHelper.wait_for_bucket_creation('bucket-0', rest),
                                    msg="bucket 'default' does not exist..")
                if insert_data:
                    buckets = rest.get_buckets()
                    for bucket in buckets:
                        BucketOperationHelper.keys_exist_or_assert(bucket_data[bucket.name]["inserted_keys"],
                                                                   server,
                                                                   bucket.name, self)
        else:
            log.error("This is not windows server!")