コード例 #1
0
    def rebalance_in_with_bucket_password_change(self):
        if self.sasl_buckets == 0:
            self.fail("no sasl buckets are specified!")
        new_pass = self.input.param("new_pass", "new_pass")
        servs_in = self.servers[self.nodes_init:self.nodes_init +
                                self.nodes_in]
        nodes_in_second = self.input.param("nodes_in_second", 1)
        servs_in_second = self.servers[self.nodes_init +
                                       self.nodes_in:self.nodes_init +
                                       self.nodes_in + nodes_in_second]
        servs_init = self.servers[:self.nodes_init]
        servs_result = self.servers[:self.nodes_init + self.nodes_in]

        rebalance = self.cluster.async_rebalance(servs_init, servs_in, [])
        rebalance.result()
        rest = RestConnection(self.master)
        bucket_to_change = [
            bucket for bucket in self.buckets
            if bucket.authType == 'sasl' and bucket.name != 'default'
        ][0]
        rest.change_bucket_props(bucket_to_change, saslPassword=new_pass)
        rebalance = self.cluster.async_rebalance(servs_result, servs_in_second,
                                                 [])
        rebalance.result()
        self.verify_unacked_bytes_all_buckets()
コード例 #2
0
    def test_bucketEvents(self):
        ops = self.input.param("ops", None)
        user = self.master.rest_username
        source = 'ns_server'
        rest = RestConnection(self.master)

        if "ip6" in self.master.ip or self.master.ip.startswith("["):
            self.ipAddress = self.getLocalIPV6Address()

        if (ops in ['create']):
            expectedResults = {'bucket_name':'TestBucket', 'ram_quota':104857600, 'num_replicas':1,
                               'replica_index':False, 'eviction_policy':'value_only', 'type':'membase', \
                               'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", \
                                "flush_enabled":False, "num_threads":3, "source":source, \
                               "user":user, "ip":self.ipAddress, "port":57457, 'sessionid':'', 'conflict_resolution_type':'seqno', \
                               'storage_mode':'couchstore','max_ttl':400,'compression_mode':'passive'}
            rest.create_bucket(expectedResults['bucket_name'], expectedResults['ram_quota'] // 1048576, expectedResults['auth_type'], 'password', expectedResults['num_replicas'], \
                               '11211', 'membase', 0, expectedResults['num_threads'], 0, 'valueOnly', maxTTL=expectedResults['max_ttl'])

        elif (ops in ['update']):
            expectedResults = {'bucket_name':'TestBucket', 'ram_quota':209715200, 'num_replicas':1, 'replica_index':False, 'eviction_policy':'value_only', 'type':'membase', \
                               'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", "flush_enabled":'true', "num_threads":3, "source":source, \
                               "user":user, "ip":self.ipAddress, "port":57457 , 'sessionid':'','storage_mode':'couchstore', 'max_ttl':400}
            rest.create_bucket(expectedResults['bucket_name'], expectedResults['ram_quota'] // 1048576, expectedResults['auth_type'], 'password', expectedResults['num_replicas'], '11211', 'membase', \
                               0, expectedResults['num_threads'], 0, 'valueOnly', maxTTL=expectedResults['max_ttl'])
            expectedResults = {'bucket_name':'TestBucket', 'ram_quota':104857600, 'num_replicas':1, 'replica_index':True, 'eviction_policy':'value_only', 'type':'membase', \
                               'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", "flush_enabled":True, "num_threads":3, "source":source, \
                               "user":user, "ip":self.ipAddress, "port":57457,'storage_mode':'couchstore', 'max_ttl':200}
            rest.change_bucket_props(expectedResults['bucket_name'], expectedResults['ram_quota'] // 1048576, expectedResults['auth_type'], 'password', expectedResults['num_replicas'], \
                                     '11211', 1, 1, maxTTL=expectedResults['max_ttl'])

        elif (ops in ['delete']):
            expectedResults = {'bucket_name':'TestBucket', 'ram_quota':104857600, 'num_replicas':1, 'replica_index':True, 'eviction_policy':'value_only', 'type':'membase', \
                               'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", "flush_enabled":False, "num_threads":3, "source":source, \
                               "user":user, "ip":self.ipAddress, "port":57457}
            rest.create_bucket(expectedResults['bucket_name'], expectedResults['ram_quota'] // 1048576, expectedResults['auth_type'], 'password', expectedResults['num_replicas'], \
                               '11211', 'membase', 1, expectedResults['num_threads'], 0, 'valueOnly')
            rest.delete_bucket(expectedResults['bucket_name'])

        elif (ops in ['flush']):
            expectedResults = {'bucket_name':'TestBucket', 'ram_quota':100, 'num_replicas':1, 'replica_index':True, 'eviction_policy':'value_only', 'type':'membase', \
           'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", "flush_enabled":True, "num_threads":3, "source":source, \
                               "user":user, "ip":self.ipAddress, "port":57457,'storage_mode':'couchstore'}
            rest.create_bucket(expectedResults['bucket_name'], expectedResults['ram_quota'], expectedResults['auth_type'], 'password', expectedResults['num_replicas'], \
                               '11211', 'membase', 1, expectedResults['num_threads'], 1, 'valueOnly')
            self.sleep(10)
            rest.flush_bucket(expectedResults['bucket_name'])

        self.checkConfig(self.eventID, self.master, expectedResults)
コード例 #3
0
    def test_bucketEvents(self):
        ops = self.input.param("ops", None)
        user = self.master.rest_username
        source = 'ns_server'
        rest = RestConnection(self.master)

        if (ops in ['create']):
            expectedResults = {'bucket_name':'TestBucket', 'ram_quota':104857600, 'num_replicas':1,
                               'replica_index':False, 'eviction_policy':'value_only', 'type':'membase', \
                               'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", \
                                "flush_enabled":False, "num_threads":3, "source":source, \
                               "user":user, "ip":self.ipAddress, "port":57457, 'sessionid':'', 'conflict_resolution_type':'seqno', \
                               'storage_mode':'couchstore','max_ttl':400,'compression_mode':'passive'}
            rest.create_bucket(expectedResults['bucket_name'], expectedResults['ram_quota'] / 1048576, expectedResults['auth_type'], 'password', expectedResults['num_replicas'], \
                               '11211', 'membase', 0, expectedResults['num_threads'], 0, 'valueOnly', maxTTL=expectedResults['max_ttl'])

        elif (ops in ['update']):
            expectedResults = {'bucket_name':'TestBucket', 'ram_quota':209715200, 'num_replicas':1, 'replica_index':False, 'eviction_policy':'value_only', 'type':'membase', \
                               'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", "flush_enabled":'true', "num_threads":3, "source":source, \
                               "user":user, "ip":self.ipAddress, "port":57457 , 'sessionid':'','storage_mode':'couchstore', 'max_ttl':400}
            rest.create_bucket(expectedResults['bucket_name'], expectedResults['ram_quota'] / 1048576, expectedResults['auth_type'], 'password', expectedResults['num_replicas'], '11211', 'membase', \
                               0, expectedResults['num_threads'], 0 , 'valueOnly', maxTTL=expectedResults['max_ttl'])
            expectedResults = {'bucket_name':'TestBucket', 'ram_quota':104857600, 'num_replicas':1, 'replica_index':True, 'eviction_policy':'value_only', 'type':'membase', \
                               'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", "flush_enabled":True, "num_threads":3, "source":source, \
                               "user":user, "ip":self.ipAddress, "port":57457,'storage_mode':'couchstore', 'max_ttl':200}
            rest.change_bucket_props(expectedResults['bucket_name'], expectedResults['ram_quota'] / 1048576, expectedResults['auth_type'], 'password', expectedResults['num_replicas'], \
                                     '11211', 1, 1, maxTTL=expectedResults['max_ttl'])

        elif (ops in ['delete']):
            expectedResults = {'bucket_name':'TestBucket', 'ram_quota':104857600, 'num_replicas':1, 'replica_index':True, 'eviction_policy':'value_only', 'type':'membase', \
                               'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", "flush_enabled":False, "num_threads":3, "source":source, \
                               "user":user, "ip":self.ipAddress, "port":57457}
            rest.create_bucket(expectedResults['bucket_name'], expectedResults['ram_quota'] / 1048576, expectedResults['auth_type'], 'password', expectedResults['num_replicas'], \
                               '11211', 'membase', 1, expectedResults['num_threads'], 0 , 'valueOnly')
            rest.delete_bucket(expectedResults['bucket_name'])

        elif (ops in ['flush']):
            expectedResults = {'bucket_name':'TestBucket', 'ram_quota':100, 'num_replicas':1, 'replica_index':True, 'eviction_policy':'value_only', 'type':'membase', \
			    'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", "flush_enabled":True, "num_threads":3, "source":source, \
                               "user":user, "ip":self.ipAddress, "port":57457,'storage_mode':'couchstore'}
            rest.create_bucket(expectedResults['bucket_name'], expectedResults['ram_quota'], expectedResults['auth_type'], 'password', expectedResults['num_replicas'], \
                               '11211', 'membase', 1, expectedResults['num_threads'], 1, 'valueOnly')
            self.sleep(10)
            rest.flush_bucket(expectedResults['bucket_name'])

        self.checkConfig(self.eventID, self.master, expectedResults)
コード例 #4
0
ファイル: rebalancein.py プロジェクト: Boggypop/testrunner
    def rebalance_in_with_bucket_password_change(self):
        if self.sasl_buckets == 0:
            self.fail("no sasl buckets are specified!")
        new_pass = self.input.param("new_pass", "new_pass")
        servs_in = self.servers[self.nodes_init:self.nodes_init + self.nodes_in]
        nodes_in_second = self.input.param("nodes_in_second", 1)
        servs_in_second = self.servers[self.nodes_init + self.nodes_in:
                                       self.nodes_init + self.nodes_in + nodes_in_second]
        servs_init = self.servers[:self.nodes_init]
        servs_result = self.servers[:self.nodes_init + self.nodes_in]

        rebalance = self.cluster.async_rebalance(servs_init, servs_in, [])
        rebalance.result()
        rest = RestConnection(self.master)
        bucket_to_change = [bucket for bucket in self.buckets
                            if bucket.authType =='sasl' and bucket.name !='default'][0]
        rest.change_bucket_props(bucket_to_change, saslPassword=new_pass)
        rebalance = self.cluster.async_rebalance(servs_result, servs_in_second, [])
        rebalance.result()
コード例 #5
0
 def test_bucket_edit_password(self,
                               bucket_name='secretsbucket',
                               num_replicas=1,
                               bucket_size=100):
     updated_pass = "******"
     rest = RestConnection(self.master)
     for servers in self.servers:
         self.secretmgmt_base_obj.setup_pass_node(servers, self.password)
     bucket_type = self.input.param("bucket_type", 'standard')
     tasks = []
     if bucket_type == 'sasl':
         self.cluster.create_sasl_bucket(self.master, bucket_name,
                                         self.password, num_replicas,
                                         bucket_size)
         self.sleep(10)
         rest.change_bucket_props(bucket_name, saslPassword=updated_pass)
     else:
         self.log.error('Bucket type not specified')
         return
     self.assertTrue(
         BucketOperationHelper.wait_for_bucket_creation(
             bucket_name, RestConnection(self.master)),
         msg='failed to start up bucket with name "{0}'.format(bucket_name))
     gen_load = BlobGenerator('buckettest',
                              'buckettest-',
                              self.value_size,
                              start=0,
                              end=self.num_items)
     self._load_all_buckets(self.master, gen_load, "create", 0)
     install_path = self.secretmgmt_base_obj._get_install_path(self.master)
     temp_result = self.secretmgmt_base_obj.check_config_files(
         self.master, install_path, '/config/config.dat', updated_pass)
     self.assertTrue(temp_result, "Password found in config.dat")
     temp_result = self.secretmgmt_base_obj.check_config_files(
         self.master, install_path, 'isasl.pw', updated_pass)
     self.assertTrue(temp_result, "Password found in isasl.pw")
コード例 #6
0
class BucketConfig(BaseTestCase):

    def setUp(self):
        super(BucketConfig, self).setUp()
        self.testcase = '2'
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        #self.time_synchronization = self.input.param("time_sync", "enabledWithoutDrift")
        self.lww = self.input.param("lww", True)
        self.drift = self.input.param("drift", False)
        self.bucket='bucket-1'
        self.master = self.servers[0]
        self.rest = RestConnection(self.master)
        self.cluster = Cluster()
        self.skip_rebalance = self.input.param("skip_rebalance", False)

        node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
        mem_quota = int(self.rest.get_nodes_self().mcdMemoryReserved *
                        node_ram_ratio)

        if not self.skip_rebalance:
            self.rest.init_cluster(self.master.rest_username,
                self.master.rest_password)
            self.rest.init_cluster_memoryQuota(self.master.rest_username,
                self.master.rest_password,
                memoryQuota=mem_quota)
            for server in self.servers:
                ClusterOperationHelper.cleanup_cluster([server])
                ClusterOperationHelper.wait_for_ns_servers_or_assert(
                    [self.master], self.testcase)
            try:
                rebalanced = ClusterOperationHelper.add_and_rebalance(
                    self.servers)

            except Exception as e:
                self.fail(e, 'cluster is not rebalanced')

        self._create_bucket(self.lww, self.drift)

    def tearDown(self):
        super(BucketConfig, self).tearDown()
        return
        if not "skip_cleanup" in TestInputSingleton.input.test_params:
            BucketOperationHelper.delete_all_buckets_or_assert(
                self.servers, self.testcase)
            ClusterOperationHelper.cleanup_cluster(self.servers)
            ClusterOperationHelper.wait_for_ns_servers_or_assert(
                self.servers, self.testcase)

    def test_modify_bucket_params(self):
        try:
            self.log.info("Modifying timeSynchronization value after bucket creation .....")
            self._modify_bucket()
        except Exception as e:
            traceback.print_exc()
            self.fail('[ERROR] Modify testcase failed .., {0}'.format(e))

    def test_restart(self):
        try:
            self.log.info("Restarting the servers ..")
            self._restart_server(self.servers[:])
            self.log.info("Verifying bucket settings after restart ..")
            self._check_config()
        except Exception as e:
            traceback.print_exc()
            self.fail("[ERROR] Check data after restart failed with exception {0}".format(e))

    def test_failover(self):
        num_nodes=1
        self.cluster.failover(self.servers, self.servers[1:num_nodes])
        try:
            self.log.info("Failing over 1 of the servers ..")
            self.cluster.rebalance(self.servers, [], self.servers[1:num_nodes])
            self.log.info("Verifying bucket settings after failover ..")
            self._check_config()
        except Exception as e:
            traceback.print_exc()
            self.fail('[ERROR]Failed to failover .. , {0}'.format(e))

    def test_rebalance_in(self):
        try:
            self.log.info("Rebalancing 1 of the servers ..")
            ClusterOperationHelper.add_and_rebalance(
                self.servers)
            self.log.info("Verifying bucket settings after rebalance ..")
            self._check_config()
        except Exception as e:
            self.fail('[ERROR]Rebalance failed .. , {0}'.format(e))

    def test_backup_same_cluster(self):
        self.shell = RemoteMachineShellConnection(self.master)
        self.buckets = RestConnection(self.master).get_buckets()
        self.couchbase_login_info = "%s:%s" % (self.input.membase_settings.rest_username,
                                               self.input.membase_settings.rest_password)
        self.backup_location = "/tmp/backup"
        self.command_options = self.input.param("command_options", '')
        try:
            shell = RemoteMachineShellConnection(self.master)
            self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, self.command_options)

            time.sleep(5)
            shell.restore_backupFile(self.couchbase_login_info, self.backup_location, [bucket.name for bucket in self.buckets])

        finally:
            self._check_config()

    def test_backup_diff_bucket(self):
        self.shell = RemoteMachineShellConnection(self.master)
        self.buckets = RestConnection(self.master).get_buckets()
        self.couchbase_login_info = "%s:%s" % (self.input.membase_settings.rest_username,
                                               self.input.membase_settings.rest_password)
        self.backup_location = "/tmp/backup"
        self.command_options = self.input.param("command_options", '')
        try:
            shell = RemoteMachineShellConnection(self.master)
            self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, self.command_options)

            time.sleep(5)
            self._create_bucket(lww=False, name="new_bucket")
            self.buckets = RestConnection(self.master).get_buckets()
            shell.restore_backupFile(self.couchbase_login_info, self.backup_location, ["new_bucket"])

        finally:
            self._check_config()

    ''' Helper functions for above testcases
    '''
    #create a bucket if it doesn't exist. The drift parameter is currently unused
    def _create_bucket(self, lww=True, drift=False, name=None):

        if lww:
            self.lww=lww

        if  name:
            self.bucket=name

        helper = RestHelper(self.rest)
        if not helper.bucket_exists(self.bucket):
            node_ram_ratio = BucketOperationHelper.base_bucket_ratio(
                self.servers)
            info = self.rest.get_nodes_self()
            self.rest.create_bucket(bucket=self.bucket,
                ramQuotaMB=512, authType='sasl', lww=self.lww)
            try:
                ready = BucketOperationHelper.wait_for_memcached(self.master,
                    self.bucket)
            except Exception as e:
                self.fail('unable to create bucket')

    # KETAKI tochange this
    def _modify_bucket(self):
        helper = RestHelper(self.rest)
        node_ram_ratio = BucketOperationHelper.base_bucket_ratio(
            self.servers)
        info = self.rest.get_nodes_self()

        status, content = self.rest.change_bucket_props(bucket=self.bucket,
            ramQuotaMB=512, authType='sasl', timeSynchronization='enabledWithOutDrift')
        if re.search('TimeSyncronization not allowed in update bucket', content):
            self.log.info('[PASS]Expected modify bucket to disallow Time Synchronization.')
        else:
            self.fail('[ERROR] Not expected to allow modify bucket for Time Synchronization')

    def _restart_server(self, servers):
        for server in servers:
            shell = RemoteMachineShellConnection(server)
            shell.stop_couchbase()
            time.sleep(10)
            shell.start_couchbase()
            shell.disconnect()
        ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, self, wait_if_warmup=True)

    # REBOOT
    def _reboot_server(self):
        try:
            for server in self.servers[:]:
                shell = RemoteMachineShellConnection(server)
                if shell.extract_remote_info().type.lower() == 'windows':
                    o, r = shell.execute_command("shutdown -r -f -t 0")
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} is being stopped".format(server.ip))
                elif shell.extract_remote_info().type.lower() == 'linux':
                    o, r = shell.execute_command("reboot")
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} is being stopped".format(server.ip))

                    ClusterOperationHelper.wait_for_ns_servers_or_assert([server], self, wait_if_warmup=True)
                    shell = RemoteMachineShellConnection(server)
                    command = "/sbin/iptables -F"
                    o, r = shell.execute_command(command)
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} backup".format(server.ip))
        finally:
            self.log.info("Warming-up servers ..")
            ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self, wait_if_warmup=True)



    def _check_config(self):
        rc = self.rest.get_bucket_json(self.bucket)
        if 'conflictResolution' in rc:
            conflictResolution  = self.rest.get_bucket_json(self.bucket)['conflictResolutionType']
            self.assertTrue(conflictResolution == 'lww', 'Expected conflict resolution of lww but got {0}'.format(conflictResolution))


        """ drift is disabled in 4.6, commenting out for now as it may come back later
コード例 #7
0
class AutoRetryFailedRebalance(RebalanceBaseTest):
    def setUp(self):
        super(AutoRetryFailedRebalance, self).setUp()
        self.rest = RestConnection(self.servers[0])
        self.sleep_time = self.input.param("sleep_time", 15)
        self.enabled = self.input.param("enabled", True)
        self.afterTimePeriod = self.input.param("afterTimePeriod", 300)
        self.maxAttempts = self.input.param("maxAttempts", 1)
        self.log.info("Changing the retry rebalance settings ....")
        self.change_retry_rebalance_settings(
            enabled=self.enabled,
            afterTimePeriod=self.afterTimePeriod,
            maxAttempts=self.maxAttempts)
        self.rebalance_operation = self.input.param("rebalance_operation",
                                                    "rebalance_out")
        self.disable_auto_failover = self.input.param("disable_auto_failover",
                                                      True)
        self.auto_failover_timeout = self.input.param("auto_failover_timeout",
                                                      120)
        if self.disable_auto_failover:
            self.rest.update_autofailover_settings(False, 120)
        else:
            self.rest.update_autofailover_settings(True,
                                                   self.auto_failover_timeout)

    def tearDown(self):
        self.reset_retry_rebalance_settings()
        # Reset to default value
        super(AutoRetryFailedRebalance, self).tearDown()
        rest = RestConnection(self.servers[0])
        zones = rest.get_zone_names()
        for zone in zones:
            if zone != "Group 1":
                rest.delete_zone(zone)

    def test_auto_retry_of_failed_rebalance_where_failure_happens_before_rebalance(
            self):
        before_rebalance_failure = self.input.param("before_rebalance_failure",
                                                    "stop_server")
        # induce the failure before the rebalance starts
        self._induce_error(before_rebalance_failure)
        self.sleep(self.sleep_time)
        try:
            operation = self._rebalance_operation(self.rebalance_operation)
            operation.result()
        except Exception as e:
            self.log.info("Rebalance failed with : {0}".format(str(e)))
            # Recover from the error
            self._recover_from_error(before_rebalance_failure)
            self.check_retry_rebalance_succeeded()
        else:
            self.fail(
                "Rebalance did not fail as expected. Hence could not validate auto-retry feature.."
            )
        finally:
            if self.disable_auto_failover:
                self.rest.update_autofailover_settings(True, 120)
            self.start_server(self.servers[1])
            self.stop_firewall_on_node(self.servers[1])

    def test_auto_retry_of_failed_rebalance_where_failure_happens_during_rebalance(
            self):
        during_rebalance_failure = self.input.param("during_rebalance_failure",
                                                    "stop_server")
        try:
            operation = self._rebalance_operation(self.rebalance_operation)
            self.sleep(self.sleep_time)
            # induce the failure during the rebalance
            self._induce_error(during_rebalance_failure)
            operation.result()
        except Exception as e:
            self.log.info("Rebalance failed with : {0}".format(str(e)))
            # Recover from the error
            self._recover_from_error(during_rebalance_failure)
            self.check_retry_rebalance_succeeded()
        else:
            # This is added as the failover task is not throwing exception
            if self.rebalance_operation == "graceful_failover":
                # Recover from the error
                self._recover_from_error(during_rebalance_failure)
                self.check_retry_rebalance_succeeded()
            else:
                self.fail(
                    "Rebalance did not fail as expected. Hence could not validate auto-retry feature.."
                )
        finally:
            if self.disable_auto_failover:
                self.rest.update_autofailover_settings(True, 120)
            self.start_server(self.servers[1])
            self.stop_firewall_on_node(self.servers[1])

    def test_auto_retry_of_failed_rebalance_does_not_get_triggered_when_rebalance_is_stopped(
            self):
        operation = self._rebalance_operation(self.rebalance_operation)
        reached = RestHelper(self.rest).rebalance_reached(30)
        self.assertTrue(reached,
                        "Rebalance failed or did not reach {0}%".format(30))
        self.rest.stop_rebalance(wait_timeout=self.sleep_time)
        result = json.loads(self.rest.get_pending_rebalance_info())
        self.log.info(result)
        retry_rebalance = result["retry_rebalance"]
        if retry_rebalance != "not_pending":
            self.fail(
                "Auto-retry succeeded even when Rebalance was stopped by user")

    def test_negative_auto_retry_of_failed_rebalance_where_rebalance_will_be_cancelled(
            self):
        during_rebalance_failure = self.input.param("during_rebalance_failure",
                                                    "stop_server")
        post_failure_operation = self.input.param("post_failure_operation",
                                                  "cancel_pending_rebalance")
        try:
            operation = self._rebalance_operation(self.rebalance_operation)
            self.sleep(self.sleep_time)
            # induce the failure during the rebalance
            self._induce_error(during_rebalance_failure)
            operation.result()
        except Exception as e:
            self.log.info("Rebalance failed with : {0}".format(str(e)))
            # Recover from the error
            self._recover_from_error(during_rebalance_failure)
            result = json.loads(self.rest.get_pending_rebalance_info())
            self.log.info(result)
            retry_rebalance = result["retry_rebalance"]
            rebalance_id = result["rebalance_id"]
            if retry_rebalance != "pending":
                self.fail("Auto-retry of failed rebalance is not triggered")
            if post_failure_operation == "cancel_pending_rebalance":
                # cancel pending rebalance
                self.log.info(
                    "Cancelling rebalance Id: {0}".format(rebalance_id))
                self.rest.cancel_pending_rebalance(rebalance_id)
            elif post_failure_operation == "disable_auto_retry":
                # disable the auto retry of the failed rebalance
                self.log.info(
                    "Disable the the auto retry of the failed rebalance")
                self.change_retry_rebalance_settings(enabled=False)
            elif post_failure_operation == "retry_failed_rebalance_manually":
                # retry failed rebalance manually
                self.log.info(
                    "Retrying failed rebalance Id: {0}".format(rebalance_id))
                self.cluster.rebalance(self.servers[:self.nodes_init], [], [])
            else:
                self.fail("Invalid post_failure_operation option")
            # Now check and ensure retry won't happen
            result = json.loads(self.rest.get_pending_rebalance_info())
            self.log.info(result)
            retry_rebalance = result["retry_rebalance"]
            if retry_rebalance != "not_pending":
                self.fail("Auto-retry of failed rebalance is not cancelled")
        else:
            self.fail(
                "Rebalance did not fail as expected. Hence could not validate auto-retry feature.."
            )
        finally:
            if self.disable_auto_failover:
                self.rest.update_autofailover_settings(True, 120)
            self.start_server(self.servers[1])
            self.stop_firewall_on_node(self.servers[1])

    def test_negative_auto_retry_of_failed_rebalance_where_rebalance_will_not_be_cancelled(
            self):
        during_rebalance_failure = self.input.param("during_rebalance_failure",
                                                    "stop_server")
        post_failure_operation = self.input.param("post_failure_operation",
                                                  "create_delete_buckets")
        zone_name = "Group_{0}_{1}".format(random.randint(1, 1000000000),
                                           self._testMethodName)
        zone_name = zone_name[0:60]
        default_zone = "Group 1"
        moved_node = []
        moved_node.append(self.servers[1].ip)
        try:
            operation = self._rebalance_operation(self.rebalance_operation)
            self.sleep(self.sleep_time)
            # induce the failure during the rebalance
            self._induce_error(during_rebalance_failure)
            operation.result()
        except Exception as e:
            self.log.info("Rebalance failed with : {0}".format(str(e)))
            # Recover from the error
            self._recover_from_error(during_rebalance_failure)
            result = json.loads(self.rest.get_pending_rebalance_info())
            self.log.info(result)
            retry_rebalance = result["retry_rebalance"]
            if retry_rebalance != "pending":
                self.fail("Auto-retry of failed rebalance is not triggered")
            if post_failure_operation == "create_delete_buckets":
                # delete buckets and create new one
                BucketOperationHelper.delete_all_buckets_or_assert(
                    servers=self.servers, test_case=self)
                self.sleep(self.sleep_time)
                BucketOperationHelper.create_bucket(self.master,
                                                    test_case=self)
            elif post_failure_operation == "change_replica_count":
                # change replica count
                self.log.info("Changing replica count of buckets")
                for bucket in self.buckets:
                    self.rest.change_bucket_props(bucket, replicaNumber=2)
            elif post_failure_operation == "change_server_group":
                # change server group
                self.log.info("Creating new zone " + zone_name)
                self.rest.add_zone(zone_name)
                self.log.info("Moving {0} to new zone {1}".format(
                    moved_node, zone_name))
                status = self.rest.shuffle_nodes_in_zones(
                    moved_node, default_zone, zone_name)
            else:
                self.fail("Invalid post_failure_operation option")
            # In these failure scenarios while the retry is pending, then the retry will be attempted but fail
            try:
                self.check_retry_rebalance_succeeded()
            except Exception as e:
                self.log.info(e)
                if "Retrying of rebalance still did not help. All the retries exhausted" not in str(
                        e):
                    self.fail(
                        "Auto retry of failed rebalance succeeded when it was expected to fail"
                    )
        else:
            self.fail(
                "Rebalance did not fail as expected. Hence could not validate auto-retry feature.."
            )
        finally:
            if post_failure_operation == "change_server_group":
                status = self.rest.shuffle_nodes_in_zones(
                    moved_node, zone_name, default_zone)
                self.log.info(
                    "Shuffle the node back to default group . Status : {0}".
                    format(status))
                self.sleep(self.sleep_time)
                self.log.info("Deleting new zone " + zone_name)
                try:
                    self.rest.delete_zone(zone_name)
                except:
                    self.log.info("Errors in deleting zone")
            if self.disable_auto_failover:
                self.rest.update_autofailover_settings(True, 120)
            self.start_server(self.servers[1])
            self.stop_firewall_on_node(self.servers[1])

    def test_auto_retry_of_failed_rebalance_with_rebalance_test_conditions(
            self):
        test_failure_condition = self.input.param("test_failure_condition")
        # induce the failure before the rebalance starts
        self._induce_rebalance_test_condition(test_failure_condition)
        self.sleep(self.sleep_time)
        try:
            operation = self._rebalance_operation(self.rebalance_operation)
            operation.result()
        except Exception as e:
            self.log.info("Rebalance failed with : {0}".format(str(e)))
            # Delete the rebalance test condition so that we recover from the error
            self._delete_rebalance_test_condition(test_failure_condition)
            self.check_retry_rebalance_succeeded()
        else:
            self.fail(
                "Rebalance did not fail as expected. Hence could not validate auto-retry feature.."
            )
        finally:
            if self.disable_auto_failover:
                self.rest.update_autofailover_settings(True, 120)
            self._delete_rebalance_test_condition(test_failure_condition)

    def test_auto_retry_of_failed_rebalance_with_autofailvoer_enabled(self):
        before_rebalance_failure = self.input.param("before_rebalance_failure",
                                                    "stop_server")
        # induce the failure before the rebalance starts
        self._induce_error(before_rebalance_failure)
        try:
            operation = self._rebalance_operation(self.rebalance_operation)
            operation.result()
        except Exception as e:
            self.log.info("Rebalance failed with : {0}".format(str(e)))
            if self.auto_failover_timeout < self.afterTimePeriod:
                self.sleep(self.auto_failover_timeout)
                result = json.loads(self.rest.get_pending_rebalance_info())
                self.log.info(result)
                retry_rebalance = result["retry_rebalance"]
                if retry_rebalance != "not_pending":
                    self.fail(
                        "Auto-failover did not cancel pending retry of the failed rebalance"
                    )
            else:
                try:
                    self.check_retry_rebalance_succeeded()
                except Exception as e:
                    if "Retrying of rebalance still did not help" not in str(
                            e):
                        self.fail(
                            "retry rebalance succeeded even without failover")
                    self.sleep(self.auto_failover_timeout)
                    self.cluster.rebalance(self.servers[:self.nodes_init], [],
                                           [])
        else:
            self.fail(
                "Rebalance did not fail as expected. Hence could not validate auto-retry feature.."
            )
        finally:
            if self.disable_auto_failover:
                self.rest.update_autofailover_settings(True, 120)
            self.start_server(self.servers[1])
            self.stop_firewall_on_node(self.servers[1])

    def _rebalance_operation(self, rebalance_operation):
        self.log.info("Starting rebalance operation of type : {0}".format(
            rebalance_operation))
        if rebalance_operation == "rebalance_out":
            operation = self.cluster.async_rebalance(
                self.servers[:self.nodes_init], [], self.servers[1:])
        elif rebalance_operation == "rebalance_in":
            operation = self.cluster.async_rebalance(
                self.servers[:self.nodes_init],
                [self.servers[self.nodes_init]], [])
        elif rebalance_operation == "swap_rebalance":
            self.rest.add_node(self.master.rest_username,
                               self.master.rest_password,
                               self.servers[self.nodes_init].ip,
                               self.servers[self.nodes_init].port)
            operation = self.cluster.async_rebalance(
                self.servers[:self.nodes_init], [],
                [self.servers[self.nodes_init - 1]])
        elif rebalance_operation == "graceful_failover":
            # TODO : retry for graceful failover is not yet implemented
            operation = self.cluster.async_failover(
                [self.master],
                failover_nodes=[self.servers[1]],
                graceful=True,
                wait_for_pending=120)
        return operation

    def _induce_error(self, error_condition):
        if error_condition == "stop_server":
            self.stop_server(self.servers[1])
        elif error_condition == "enable_firewall":
            self.start_firewall_on_node(self.servers[1])
        elif error_condition == "kill_memcached":
            self.kill_server_memcached(self.servers[1])
        elif error_condition == "reboot_server":
            shell = RemoteMachineShellConnection(self.servers[1])
            shell.reboot_node()
        elif error_condition == "kill_erlang":
            shell = RemoteMachineShellConnection(self.servers[1])
            shell.kill_erlang()
            self.sleep(self.sleep_time * 3)
        else:
            self.fail("Invalid error induce option")

    def _recover_from_error(self, error_condition):
        if error_condition == "stop_server" or error_condition == "kill_erlang":
            self.start_server(self.servers[1])
            self.sleep(self.sleep_time * 4)
        elif error_condition == "enable_firewall":
            self.stop_firewall_on_node(self.servers[1])
        elif error_condition == "reboot_server":
            self.sleep(self.sleep_time * 4)
            # wait till node is ready after warmup
            ClusterOperationHelper.wait_for_ns_servers_or_assert(
                [self.servers[1]], self, wait_if_warmup=True)

    def _induce_rebalance_test_condition(self, test_failure_condition):
        if test_failure_condition == "verify_replication":
            set_command = "testconditions:set(verify_replication, {fail, \"" + "default" + "\"})"
        elif test_failure_condition == "backfill_done":
            set_command = "testconditions:set(backfill_done, {for_vb_move, \"" + "default\", 1 , " + "fail})"
        else:
            set_command = "testconditions:set({0}, fail)".format(
                test_failure_condition)
        get_command = "testconditions:get({0})".format(test_failure_condition)
        for server in self.servers:
            rest = RestConnection(server)
            _, content = rest.diag_eval(set_command)
            self.log.info("Command : {0} Return : {1}".format(
                set_command, content))

        for server in self.servers:
            rest = RestConnection(server)
            _, content = rest.diag_eval(get_command)
            self.log.info("Command : {0} Return : {1}".format(
                get_command, content))

    def _delete_rebalance_test_condition(self, test_failure_condition):
        delete_command = "testconditions:delete({0})".format(
            test_failure_condition)
        get_command = "testconditions:get({0})".format(test_failure_condition)
        for server in self.servers:
            rest = RestConnection(server)
            _, content = rest.diag_eval(delete_command)
            self.log.info("Command : {0} Return : {1}".format(
                delete_command, content))

        for server in self.servers:
            rest = RestConnection(server)
            _, content = rest.diag_eval(get_command)
            self.log.info("Command : {0} Return : {1}".format(
                get_command, content))
コード例 #8
0
class SecondaryIndexingClusterOpsTests(BaseSecondaryIndexingTests):
    def setUp(self):
        super(SecondaryIndexingClusterOpsTests, self).setUp()
        server = self.get_nodes_from_services_map(service_type="n1ql")
        self.rest = RestConnection(server)

    def tearDown(self):
        super(SecondaryIndexingClusterOpsTests, self).tearDown()

    def test_remove_bucket_and_query(self):
        #Initialization operation
        self.run_multi_operations(
            buckets=self.buckets,
            query_definitions=self.query_definitions,
            create_index=True,
            drop_index=False,
            query_with_explain=self.run_query_with_explain,
            query=self.run_query)
        #Remove bucket and recreate it
        for bucket in self.buckets:
            self.rest.delete_bucket(bucket.name)
        #Verify the result set is empty
        self.verify_index_absence(query_definitions=self.query_definitions,
                                  buckets=self.buckets)

    def test_change_bucket_properties(self):
        #Initialization operation
        self.run_multi_operations(buckets=self.buckets,
                                  query_definitions=self.query_definitions,
                                  create_index=True,
                                  drop_index=False,
                                  query_with_explain=True,
                                  query=True)

        #Change Bucket Properties
        for bucket in self.buckets:
            self.rest.change_bucket_props(bucket,
                                          ramQuotaMB=None,
                                          authType=None,
                                          saslPassword=None,
                                          replicaNumber=0,
                                          proxyPort=None,
                                          replicaIndex=None,
                                          flushEnabled=False)

        #Run query and query explain
        self.run_multi_operations(buckets=self.buckets,
                                  query_definitions=self.query_definitions,
                                  create_index=False,
                                  drop_index=True,
                                  query_with_explain=True,
                                  query=True)

    def test_flush_bucket_and_query(self):
        #Initialization operation
        self.run_multi_operations(buckets=self.buckets,
                                  query_definitions=self.query_definitions,
                                  create_index=True,
                                  drop_index=False,
                                  query_with_explain=True,
                                  query=True)
        #Remove bucket and recreate it
        for bucket in self.buckets:
            self.rest.flush_bucket(bucket.name)
        rollback_exception = True
        query_try_count = 0
        while rollback_exception and query_try_count < 10:
            self.sleep(5)
            query_try_count += 1
            #Query and bucket with empty result set
            try:
                self.multi_query_using_index_with_emptyresult(
                    query_definitions=self.query_definitions,
                    buckets=self.buckets)
                rollback_exception = False
            except Exception, ex:
                msg = "Indexer rollback"
                if msg not in str(ex):
                    rollback_exception = False
                    self.log.info(ex)
                    raise
        self.assertFalse(rollback_exception,
                         "Indexer still in rollback after 50 secs.")
コード例 #9
0
class SecondaryIndexingClusterOpsTests(BaseSecondaryIndexingTests):
    def setUp(self):
        super(SecondaryIndexingClusterOpsTests, self).setUp()
        server = self.get_nodes_from_services_map(service_type="n1ql")
        self.rest = RestConnection(server)

    def tearDown(self):
        super(SecondaryIndexingClusterOpsTests, self).tearDown()

    def test_remove_bucket_and_query(self):
        #Initialization operation
        self.run_multi_operations(
            buckets=self.buckets,
            query_definitions=self.query_definitions,
            create_index=True,
            drop_index=False,
            query_with_explain=self.run_query_with_explain,
            query=self.run_query)
        #Remove bucket and recreate it
        for bucket in self.buckets:
            self.rest.delete_bucket(bucket.name)
        #Verify the result set is empty
        self.verify_index_absence(query_definitions=self.query_definitions,
                                  buckets=self.buckets)

    def test_change_bucket_properties(self):
        #Initialization operation
        self.run_multi_operations(buckets=self.buckets,
                                  query_definitions=self.query_definitions,
                                  create_index=True,
                                  drop_index=False,
                                  query_with_explain=True,
                                  query=True)

        #Change Bucket Properties
        for bucket in self.buckets:
            self.rest.change_bucket_props(bucket,
                                          ramQuotaMB=None,
                                          authType=None,
                                          saslPassword=None,
                                          replicaNumber=0,
                                          proxyPort=None,
                                          replicaIndex=None,
                                          flushEnabled=False)

        #Run query and query explain
        self.run_multi_operations(buckets=self.buckets,
                                  query_definitions=self.query_definitions,
                                  create_index=False,
                                  drop_index=True,
                                  query_with_explain=True,
                                  query=True)

    def test_flush_bucket_and_query(self):
        #Initialization operation
        self.run_multi_operations(buckets=self.buckets,
                                  query_definitions=self.query_definitions,
                                  create_index=True,
                                  drop_index=False,
                                  query_with_explain=True,
                                  query=True)
        #Remove bucket and recreate it
        for bucket in self.buckets:
            self.rest.flush_bucket(bucket.name)
        rollback_exception = True
        query_try_count = 0
        while rollback_exception and query_try_count < 10:
            self.sleep(5)
            query_try_count += 1
            #Query and bucket with empty result set
            try:
                self.multi_query_using_index_with_emptyresult(
                    query_definitions=self.query_definitions,
                    buckets=self.buckets)
                rollback_exception = False
            except Exception as ex:
                msg = "Indexer rollback"
                if msg not in str(ex):
                    rollback_exception = False
                    self.log.info(ex)
                    raise
        self.assertFalse(rollback_exception,
                         "Indexer still in rollback after 50 secs.")

    def test_delete_create_bucket_and_query(self):
        #Initialization operation
        self.run_multi_operations(
            buckets=self.buckets,
            query_definitions=self.query_definitions,
            create_index=True,
            drop_index=False,
            query_with_explain=self.run_query_with_explain,
            query=self.run_query)
        #Remove bucket and recreate it
        for bucket in self.buckets:
            self.rest.delete_bucket(bucket.name)
        self.sleep(2)
        #Flush bucket and recreate it
        self._bucket_creation()
        self.sleep(2)
        ClusterOperationHelper.wait_for_ns_servers_or_assert(
            self.servers, self)
        #Verify the result set is empty
        self.verify_index_absence(query_definitions=self.query_definitions,
                                  buckets=self.buckets)
        index_map = self.get_index_stats()
        self.assertTrue(
            len(index_map) == 0,
            "Index Stats still show {0}".format(index_map))

    def test_data_loss(self):
        #Initialization operation
        self.run_multi_operations(buckets=self.buckets,
                                  query_definitions=self.query_definitions,
                                  create_index=True,
                                  drop_index=False,
                                  query_with_explain=False,
                                  query=False)
        self._verify_bucket_count_with_index_count()
        try:
            servr_out = self.servers[1:self.nodes_init]
            failover_task = self.cluster.async_failover(
                [self.master], failover_nodes=servr_out, graceful=False)
            failover_task.result()
            rebalance = self.cluster.async_rebalance(self.servers[:1], [],
                                                     servr_out)
            rebalance.result()
            # get the items in the index and check if the data loss is reflected correctly
            self.sleep(2)
        except Exception as ex:
            raise
        finally:
            self.run_multi_operations(buckets=self.buckets,
                                      query_definitions=self.query_definitions,
                                      create_index=False,
                                      drop_index=True,
                                      query_with_explain=False,
                                      query=False)

    def test_tombstone_removal_impact(self):
        #Initialization operation
        self.run_multi_operations(buckets=self.buckets,
                                  query_definitions=self.query_definitions,
                                  create_index=True,
                                  drop_index=False,
                                  query_with_explain=False,
                                  query=False)
        self.sleep(20)
        self._verify_bucket_count_with_index_count()
        try:
            # Run operations expiry and deletion
            self.run_doc_ops()
            tasks = []
            # Run auto-compaction to remove the tomb stones
            for bucket in self.buckets:
                tasks.append(
                    self.cluster.async_compact_bucket(self.master, bucket))
            for task in tasks:
                task.result()
            self.sleep(10)
            # run compaction and analyze results
            self.run_multi_operations(buckets=self.buckets,
                                      query_definitions=self.query_definitions,
                                      create_index=True,
                                      drop_index=False,
                                      query_with_explain=True,
                                      query=True)
        except Exception as ex:
            self.log.info(str(ex))
            raise
        finally:
            self.run_multi_operations(buckets=self.buckets,
                                      query_definitions=self.query_definitions,
                                      create_index=False,
                                      drop_index=True,
                                      query_with_explain=False,
                                      query=False)

    def test_maxttl_setting(self):
        """
        Load data, create index, check if all docs are indexed
        Wait until maxttl has elapsed, check if all docs are deleted
        and the deletes are indexed
        :return:
        """
        maxttl = int(self.input.param("maxttl", None))
        self.run_multi_operations(buckets=self.buckets,
                                  query_definitions=self.query_definitions,
                                  create_index=True,
                                  drop_index=False,
                                  query_with_explain=False,
                                  query=False)
        self.sleep(20)
        self._verify_bucket_count_with_index_count()
        self.sleep(
            maxttl,
            "waiting for docs to be expired automatically per maxttl rule")
        self._expiry_pager(self.master)
        self.sleep(60, "wait for expiry pager to run on all nodes...")
        for bucket in self.buckets:
            items = RestConnection(self.master).get_active_key_count(bucket)
            self.log.info(
                "Docs in source bucket is {0} after maxttl has elapsed".format(
                    items))
            if items != 0:
                self.fail(
                    "Docs in source bucket is not 0 after maxttl has elapsed")
        self._verify_bucket_count_with_index_count()
コード例 #10
0
ファイル: cluster_ops_2i.py プロジェクト: membase/testrunner
class SecondaryIndexingClusterOpsTests(BaseSecondaryIndexingTests):

    def setUp(self):
        super(SecondaryIndexingClusterOpsTests, self).setUp()
        server = self.get_nodes_from_services_map(service_type = "n1ql")
        self.rest = RestConnection(server)

    def tearDown(self):
        super(SecondaryIndexingClusterOpsTests, self).tearDown()

    def test_remove_bucket_and_query(self):
    	#Initialization operation
        self.run_multi_operations(buckets = self.buckets,
            query_definitions = self.query_definitions,
            create_index=True, drop_index = False,
            query_with_explain = self.run_query_with_explain, query = self.run_query)
        #Remove bucket and recreate it
        for bucket in self.buckets:
        	self.rest.delete_bucket(bucket.name)
        #Verify the result set is empty
        self.verify_index_absence(query_definitions = self.query_definitions, buckets = self.buckets)

    def test_change_bucket_properties(self):
    	#Initialization operation
        self.run_multi_operations(buckets = self.buckets,
            query_definitions = self.query_definitions,
            create_index = True, drop_index = False,
            query_with_explain = True, query = True)

        #Change Bucket Properties
        for bucket in self.buckets:
            self.rest.change_bucket_props(bucket,
                      ramQuotaMB=None,
                      authType=None,
                      saslPassword=None,
                      replicaNumber=0,
                      proxyPort=None,
                      replicaIndex=None,
                      flushEnabled=False)

        #Run query and query explain
        self.run_multi_operations(buckets = self.buckets,
            query_definitions = self.query_definitions,
            create_index = False, drop_index = True,
            query_with_explain = True, query = True)

    def test_flush_bucket_and_query(self):
        #Initialization operation
        self.run_multi_operations(buckets=self.buckets,
            query_definitions=self.query_definitions,
            create_index=True, drop_index=False,
            query_with_explain=True, query=True)
        #Remove bucket and recreate it
        for bucket in self.buckets:
            self.rest.flush_bucket(bucket.name)
        rollback_exception = True
        query_try_count = 0
        while rollback_exception and query_try_count < 10:
            self.sleep(5)
            query_try_count += 1
            #Query and bucket with empty result set
            try:
                self.multi_query_using_index_with_emptyresult(
                    query_definitions=self.query_definitions, buckets=self.buckets)
                rollback_exception = False
            except Exception, ex:
                msg = "Indexer rollback"
                if msg not in str(ex):
                    rollback_exception = False
                    self.log.info(ex)
                    raise
        self.assertFalse(rollback_exception, "Indexer still in rollback after 50 secs.")
コード例 #11
0
ファイル: audittest.py プロジェクト: couchbase/testrunner
    def test_bucketEvents(self):
        ops = self.input.param("ops", None)
        user = self.master.rest_username
        source = 'ns_server'
        rest = RestConnection(self.master)

        if (ops in ['create']):
            expectedResults = {
                'bucket_name': 'TestBucket',
                'ram_quota': 268435456,
                'num_replicas': 1,
                'replica_index': False,
                'eviction_policy': 'value_only',
                'type': 'membase',
                "autocompaction": 'false',
                "purge_interval": "undefined",
                "flush_enabled": False,
                "num_threads": 3,
                "source": source,
                "user": user,
                "local:ip": self.master.ip,
                "local:port": 8091,
                'sessionid': '',
                'conflict_resolution_type': 'seqno',
                'storage_mode': self.bucket_storage,
                'max_ttl': 400,
                'compression_mode': 'passive',
                'remote:ip': self.ipAddress
            }
            rest.create_bucket(bucket=expectedResults['bucket_name'],
                               ramQuotaMB=expectedResults['ram_quota'] //
                               1048576,
                               replicaNumber=expectedResults['num_replicas'],
                               proxyPort='11211',
                               bucketType='membase',
                               replica_index=0,
                               threadsNumber=expectedResults['num_threads'],
                               flushEnabled=0,
                               evictionPolicy='valueOnly',
                               maxTTL=expectedResults['max_ttl'],
                               storageBackend=self.bucket_storage)

        elif (ops in ['update']):
            expectedResults = {
                'bucket_name': 'TestBucket',
                'ram_quota': 268435456,
                'num_replicas': 1,
                'replica_index': False,
                'eviction_policy': 'value_only',
                'type': 'membase',
                "autocompaction": 'false',
                "purge_interval": "undefined",
                "flush_enabled": 'true',
                "num_threads": 3,
                "source": source,
                "user": user,
                "ip": self.ipAddress,
                "port": 57457,
                'sessionid': '',
                'storage_mode': self.bucket_storage,
                'max_ttl': 400
            }
            rest.create_bucket(bucket=expectedResults['bucket_name'],
                               ramQuotaMB=expectedResults['ram_quota'] //
                               1048576,
                               replicaNumber=expectedResults['num_replicas'],
                               proxyPort='11211',
                               bucketType='membase',
                               replica_index=0,
                               threadsNumber=expectedResults['num_threads'],
                               flushEnabled=0,
                               evictionPolicy='valueOnly',
                               maxTTL=expectedResults['max_ttl'],
                               storageBackend=self.bucket_storage)
            expectedResults = {
                'bucket_name': 'TestBucket',
                'ram_quota': 268435456,
                'num_replicas': 1,
                'replica_index': True,
                'eviction_policy': 'value_only',
                'type': 'membase',
                "autocompaction": 'false',
                "purge_interval": "undefined",
                "flush_enabled": True,
                "num_threads": 3,
                "source": source,
                "user": user,
                "ip": self.ipAddress,
                "port": 57457,
                'storage_mode': self.bucket_storage,
                'max_ttl': 200
            }
            rest.change_bucket_props(
                bucket=expectedResults['bucket_name'],
                ramQuotaMB=expectedResults['ram_quota'] // 1048576,
                replicaNumber=expectedResults['num_replicas'],
                proxyPort='11211',
                replicaIndex=1,
                maxTTL=expectedResults['max_ttl'])

        elif (ops in ['delete']):
            expectedResults = {
                'bucket_name': 'TestBucket',
                'ram_quota': 268435456,
                'num_replicas': 1,
                'replica_index': True,
                'eviction_policy': 'value_only',
                'type': 'membase',
                "autocompaction": 'false',
                "purge_interval": "undefined",
                "flush_enabled": False,
                "num_threads": 3,
                "source": source,
                "user": user,
                "ip": self.ipAddress,
                "port": 57457
            }
            rest.create_bucket(bucket=expectedResults['bucket_name'],
                               ramQuotaMB=expectedResults['ram_quota'] //
                               1048576,
                               replicaNumber=expectedResults['num_replicas'],
                               proxyPort='11211',
                               bucketType='membase',
                               replica_index=1,
                               threadsNumber=expectedResults['num_threads'],
                               flushEnabled=0,
                               evictionPolicy='valueOnly',
                               storageBackend=self.bucket_storage)
            rest.delete_bucket(expectedResults['bucket_name'])

        elif (ops in ['flush']):
            expectedResults = {
                'bucket_name': 'TestBucket',
                'ram_quota': 256,
                'num_replicas': 1,
                'replica_index': True,
                'eviction_policy': 'value_only',
                'type': 'membase',
                "autocompaction": 'false',
                "purge_interval": "undefined",
                "flush_enabled": True,
                "num_threads": 3,
                "source": source,
                "user": user,
                "ip": self.ipAddress,
                "port": 57457,
                'storage_mode': self.bucket_storage
            }
            rest.create_bucket(bucket=expectedResults['bucket_name'],
                               ramQuotaMB=expectedResults['ram_quota'],
                               replicaNumber=expectedResults['num_replicas'],
                               proxyPort='11211',
                               bucketType='membase',
                               replica_index=1,
                               threadsNumber=expectedResults['num_threads'],
                               flushEnabled=1,
                               evictionPolicy='valueOnly',
                               storageBackend=self.bucket_storage)
            self.sleep(10)
            rest.flush_bucket(expectedResults['bucket_name'])

        self.checkConfig(self.eventID, self.master, expectedResults,
                         self.disable_hostname_verification)
コード例 #12
0
class SecondaryIndexingClusterOpsTests(BaseSecondaryIndexingTests):

    def setUp(self):
        super(SecondaryIndexingClusterOpsTests, self).setUp()
        server = self.get_nodes_from_services_map(service_type = "n1ql")
        self.rest = RestConnection(server)

    def tearDown(self):
        super(SecondaryIndexingClusterOpsTests, self).tearDown()

    def test_remove_bucket_and_query(self):
    	#Initialization operation
        self.run_multi_operations(buckets = self.buckets,
            query_definitions = self.query_definitions,
            create_index = self.run_create_index, drop_index = False,
            query_with_explain = self.run_query_with_explain, query = self.run_query)
        #Remove bucket and recreate it
        for bucket in self.buckets:
        	self.rest.delete_bucket(bucket.name)
        #Verify the result set is empty
        self.verify_index_absence(query_definitions = self.query_definitions, buckets = self.buckets)

    def test_change_bucket_properties(self):
    	#Initialization operation
        self.run_multi_operations(buckets = self.buckets,
            query_definitions = self.query_definitions,
            create_index = True, drop_index = False,
            query_with_explain = True, query = True)

        #Change Bucket Properties
        for bucket in self.buckets:
            self.rest.change_bucket_props(bucket,
                      ramQuotaMB=None,
                      authType=None,
                      saslPassword=None,
                      replicaNumber=0,
                      proxyPort=None,
                      replicaIndex=None,
                      flushEnabled=False)

        #Run query and query explain
        self.run_multi_operations(buckets = self.buckets,
            query_definitions = self.query_definitions,
            create_index = False, drop_index = True,
            query_with_explain = True, query = True)

    def test_flush_bucket_and_query(self):
        #Initialization operation
        self.run_multi_operations(buckets = self.buckets,
            query_definitions = self.query_definitions,
            create_index = True, drop_index = False,
            query_with_explain = True, query = True)
        #Remove bucket and recreate it
        for bucket in self.buckets:
            self.rest.flush_bucket(bucket.name)
        self.sleep(2)
        #Query and bucket with empty result set
        self.multi_query_using_index_with_empty_result(query_definitions = self.query_definitions,
             buckets = self.buckets)

    def test_delete_create_bucket_and_query(self):
    	#Initialization operation
        self.run_multi_operations(buckets = self.buckets,
            query_definitions = self.query_definitions,
            create_index = self.run_create_index, drop_index = False,
            query_with_explain = self.run_query_with_explain, query = self.run_query)
        #Remove bucket and recreate it
        for bucket in self.buckets:
        	self.rest.delete_bucket(bucket.name)
        self.sleep(2)
        #Flush bucket and recreate it
        self._bucket_creation()
        self.sleep(2)
        ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
        #Verify the result set is empty
        self.verify_index_absence(query_definitions = self.query_definitions, buckets = self.buckets)
        index_map = self.get_index_stats()
        self.assertTrue(len(index_map) == 0, "Index Stats still show {0}".format(index_map))

    def test_data_loss(self):
        #Initialization operation
        self.run_multi_operations(buckets = self.buckets,
            query_definitions = self.query_definitions,
            create_index = True, drop_index = False,
            query_with_explain = False, query = False)
        self._verify_bucket_count_with_index_count()
        try:
            servr_out = self.servers[1:self.nodes_init]
            failover_task = self.cluster.async_failover([self.master],
                        failover_nodes = servr_out, graceful=False)
            failover_task.result()
            rebalance = self.cluster.async_rebalance(self.servers[:1],
                                    [], servr_out)
            rebalance.result()
            # get the items in the index and check if the data loss is reflected correctly
            self.sleep(2)
        except Exception, ex:
            raise
        finally:
コード例 #13
0
class SecondaryIndexingClusterOpsTests(BaseSecondaryIndexingTests):

    def setUp(self):
        super(SecondaryIndexingClusterOpsTests, self).setUp()
        server = self.get_nodes_from_services_map(service_type = "n1ql")
        self.rest = RestConnection(server)

    def tearDown(self):
        super(SecondaryIndexingClusterOpsTests, self).tearDown()

    def test_remove_bucket_and_query(self):
    	#Initialization operation
        self.run_multi_operations(buckets = self.buckets,
            query_definitions = self.query_definitions,
            create_index=True, drop_index = False,
            query_with_explain = self.run_query_with_explain, query = self.run_query)
        #Remove bucket and recreate it
        for bucket in self.buckets:
        	self.rest.delete_bucket(bucket.name)
        #Verify the result set is empty
        self.verify_index_absence(query_definitions = self.query_definitions, buckets = self.buckets)

    def test_change_bucket_properties(self):
    	#Initialization operation
        self.run_multi_operations(buckets = self.buckets,
            query_definitions = self.query_definitions,
            create_index = True, drop_index = False,
            query_with_explain = True, query = True)

        #Change Bucket Properties
        for bucket in self.buckets:
            self.rest.change_bucket_props(bucket,
                      ramQuotaMB=None,
                      authType=None,
                      saslPassword=None,
                      replicaNumber=0,
                      proxyPort=None,
                      replicaIndex=None,
                      flushEnabled=False)

        #Run query and query explain
        self.run_multi_operations(buckets = self.buckets,
            query_definitions = self.query_definitions,
            create_index = False, drop_index = True,
            query_with_explain = True, query = True)

    def test_flush_bucket_and_query(self):
        #Initialization operation
        self.run_multi_operations(buckets = self.buckets,
            query_definitions = self.query_definitions,
            create_index = True, drop_index = False,
            query_with_explain = True, query = True)
        #Remove bucket and recreate it
        for bucket in self.buckets:
            self.rest.flush_bucket(bucket.name)
        self.sleep(2)
        #Query and bucket with empty result set
        self.multi_query_using_index_with_emptyresult(query_definitions = self.query_definitions,
             buckets = self.buckets)

    def test_delete_create_bucket_and_query(self):
    	#Initialization operation
        self.run_multi_operations(buckets = self.buckets,
            query_definitions = self.query_definitions,
            create_index = True, drop_index = False,
            query_with_explain = self.run_query_with_explain, query = self.run_query)
        #Remove bucket and recreate it
        for bucket in self.buckets:
        	self.rest.delete_bucket(bucket.name)
        self.sleep(2)
        #Flush bucket and recreate it
        self._bucket_creation()
        self.sleep(2)
        ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
        #Verify the result set is empty
        self.verify_index_absence(query_definitions = self.query_definitions, buckets = self.buckets)
        index_map = self.get_index_stats()
        self.assertTrue(len(index_map) == 0, "Index Stats still show {0}".format(index_map))

    def test_data_loss(self):
        #Initialization operation
        self.run_multi_operations(buckets = self.buckets,
            query_definitions = self.query_definitions,
            create_index = True, drop_index = False,
            query_with_explain = False, query = False)
        self._verify_bucket_count_with_index_count()
        try:
            servr_out = self.servers[1:self.nodes_init]
            failover_task = self.cluster.async_failover([self.master],
                        failover_nodes = servr_out, graceful=False)
            failover_task.result()
            rebalance = self.cluster.async_rebalance(self.servers[:1],
                                    [], servr_out)
            rebalance.result()
            # get the items in the index and check if the data loss is reflected correctly
            self.sleep(2)
        except Exception, ex:
            raise
        finally:
コード例 #14
0
ファイル: cluster_ops_2i.py プロジェクト: rayleyva/testrunner
class SecondaryIndexingClusterOpsTests(BaseSecondaryIndexingTests):
    def setUp(self):
        super(SecondaryIndexingClusterOpsTests, self).setUp()
        server = self.get_nodes_from_services_map(service_type="n1ql")
        self.rest = RestConnection(server)

    def tearDown(self):
        super(SecondaryIndexingClusterOpsTests, self).tearDown()

    def test_remove_bucket_and_query(self):
        #Initialization operation
        self.run_multi_operations(
            buckets=self.buckets,
            query_definitions=self.query_definitions,
            create_index=self.run_create_index,
            drop_index=False,
            query_with_explain=self.run_query_with_explain,
            query=self.run_query)
        #Remove bucket and recreate it
        for bucket in self.buckets:
            self.rest.delete_bucket(bucket.name)
        #Verify the result set is empty
        self.verify_index_absence(query_definitions=self.query_definitions,
                                  buckets=self.buckets)

    def test_change_bucket_properties(self):
        #Initialization operation
        self.run_multi_operations(buckets=self.buckets,
                                  query_definitions=self.query_definitions,
                                  create_index=True,
                                  drop_index=False,
                                  query_with_explain=True,
                                  query=True)

        #Change Bucket Properties
        for bucket in self.buckets:
            self.rest.change_bucket_props(bucket,
                                          ramQuotaMB=None,
                                          authType=None,
                                          saslPassword=None,
                                          replicaNumber=0,
                                          proxyPort=None,
                                          replicaIndex=None,
                                          flushEnabled=False)

        #Run query and query explain
        self.run_multi_operations(buckets=self.buckets,
                                  query_definitions=self.query_definitions,
                                  create_index=False,
                                  drop_index=True,
                                  query_with_explain=True,
                                  query=True)

    def test_flush_bucket_and_query(self):
        #Initialization operation
        self.run_multi_operations(buckets=self.buckets,
                                  query_definitions=self.query_definitions,
                                  create_index=True,
                                  drop_index=False,
                                  query_with_explain=True,
                                  query=True)
        #Remove bucket and recreate it
        for bucket in self.buckets:
            self.rest.flush_bucket(bucket.name)
        self.sleep(60)
        #Query and bucket with empty result set
        self.multi_query_using_index_with_emptyresult(
            query_definitions=self.query_definitions, buckets=self.buckets)

    def test_delete_create_bucket_and_query(self):
        #Initialization operation
        self.run_multi_operations(
            buckets=self.buckets,
            query_definitions=self.query_definitions,
            create_index=self.run_create_index,
            drop_index=False,
            query_with_explain=self.run_query_with_explain,
            query=self.run_query)
        #Remove bucket and recreate it
        for bucket in self.buckets:
            self.rest.delete_bucket(bucket.name)
        self.sleep(60)
        #Flush bucket and recreate it
        self._bucket_creation()
        self.sleep(60)
        #Query and bucket with empty result set
        self.multi_query_using_index_with_emptyresult(
            query_definitions=self.query_definitions, buckets=self.buckets)
        #Verify the result set is empty
        self.verify_index_absence(query_definitions=self.query_definitions,
                                  buckets=self.buckets)