Ejemplo n.º 1
0
 def create_user(self):
     rest = RestConnection(self.host)
     response = rest.add_set_builtin_user(self.user_id,self.payload)
     return response
Ejemplo n.º 2
0
 def create_user(self):
     rest = RestConnection(self.host)
     response = rest.add_set_builtin_user(self.user_id, self.payload)
     return response
Ejemplo n.º 3
0
    def setUp(self):
        super(OnPremBaseTest, self).setUp()

        # Framework specific parameters (Extension from cb_basetest)
        self.skip_cluster_reset = self.input.param("skip_cluster_reset", False)
        self.skip_setup_cleanup = self.input.param("skip_setup_cleanup", False)
        # End of framework parameters

        # Cluster level info settings
        self.log_info = self.input.param("log_info", None)
        self.log_location = self.input.param("log_location", None)
        self.stat_info = self.input.param("stat_info", None)
        self.port = self.input.param("port", None)
        self.port_info = self.input.param("port_info", None)
        self.servers = self.input.servers
        self.num_servers = self.input.param("servers", len(self.servers))
        self.vbuckets = self.input.param("vbuckets", CbServer.total_vbuckets)
        self.gsi_type = self.input.param("gsi_type", 'plasma')
        # Memory quota settings
        # Max memory quota to utilize per node
        self.quota_percent = self.input.param("quota_percent", 100)
        # Services' RAM quota to set on cluster
        self.kv_mem_quota_percent = self.input.param("kv_quota_percent", None)
        self.index_mem_quota_percent = \
            self.input.param("index_quota_percent", None)
        self.fts_mem_quota_percent = \
            self.input.param("fts_quota_percent", None)
        self.cbas_mem_quota_percent = \
            self.input.param("cbas_quota_percent", None)
        self.eventing_mem_quota_percent = \
            self.input.param("eventing_quota_percent", None)
        # CBAS setting
        self.jre_path = self.input.param("jre_path", None)
        self.enable_dp = self.input.param("enable_dp", False)
        # End of cluster info parameters

        # Bucket specific params
        # Note: Over riding bucket_eviction_policy from CouchbaseBaseTest
        self.bucket_eviction_policy = \
            self.input.param("bucket_eviction_policy",
                             Bucket.EvictionPolicy.VALUE_ONLY)
        self.bucket_replica_index = self.input.param("bucket_replica_index", 1)
        if self.bucket_storage == Bucket.StorageBackend.magma:
            self.bucket_eviction_policy = Bucket.EvictionPolicy.FULL_EVICTION
        # End of bucket parameters

        self.services_in = self.input.param("services_in", None)
        self.forceEject = self.input.param("forceEject", False)
        self.wait_timeout = self.input.param("wait_timeout", 120)
        self.verify_unacked_bytes = \
            self.input.param("verify_unacked_bytes", False)
        self.disabled_consistent_view = \
            self.input.param("disabled_consistent_view", None)
        self.rebalanceIndexWaitingDisabled = \
            self.input.param("rebalanceIndexWaitingDisabled", None)
        self.rebalanceIndexPausingDisabled = \
            self.input.param("rebalanceIndexPausingDisabled", None)
        self.maxParallelIndexers = \
            self.input.param("maxParallelIndexers", None)
        self.maxParallelReplicaIndexers = \
            self.input.param("maxParallelReplicaIndexers", None)
        self.use_https = self.input.param("use_https", False)
        self.enforce_tls = self.input.param("enforce_tls", False)
        self.ipv4_only = self.input.param("ipv4_only", False)
        self.ipv6_only = self.input.param("ipv6_only", False)
        self.multiple_ca = self.input.param("multiple_ca", False)
        if self.use_https:
            CbServer.use_https = True
            trust_all_certs()

        self.node_utils.cleanup_pcaps(self.servers)
        self.collect_pcaps = self.input.param("collect_pcaps", False)
        if self.collect_pcaps:
            self.node_utils.start_collect_pcaps(self.servers)
        '''
        Be careful while using this flag.
        This is only and only for stand-alone tests.
        During bugs reproductions, when a crash is seen
        stop_server_on_crash will stop the server
        so that we can collect data/logs/dumps at the right time
        '''
        self.stop_server_on_crash = self.input.param("stop_server_on_crash",
                                                     False)
        self.collect_data = self.input.param("collect_data", False)
        self.validate_system_event_logs = \
            self.input.param("validate_sys_event_logs", False)

        self.nonroot = False
        self.crash_warning = self.input.param("crash_warning", False)

        # Populate memcached_port in case of cluster_run
        cluster_run_base_port = ClusterRun.port
        if int(self.input.servers[0].port) == ClusterRun.port:
            for server in self.input.servers:
                server.port = cluster_run_base_port
                cluster_run_base_port += 1
                # If not defined in node.ini under 'memcached_port' section
                if server.memcached_port is CbServer.memcached_port:
                    server.memcached_port = \
                        ClusterRun.memcached_port \
                        + (2 * (int(server.port) - ClusterRun.port))

        self.log_setup_status(self.__class__.__name__, "started")
        cluster_name_format = "C%s"
        default_cluster_index = counter_index = 1
        if len(self.input.clusters) > 1:
            # Multi cluster setup
            for _, nodes in self.input.clusters.iteritems():
                cluster_name = cluster_name_format % counter_index
                tem_cluster = CBCluster(name=cluster_name,
                                        servers=nodes,
                                        vbuckets=self.vbuckets)
                self.cb_clusters[cluster_name] = tem_cluster
                counter_index += 1
        else:
            # Single cluster
            cluster_name = cluster_name_format % counter_index
            self.cb_clusters[cluster_name] = CBCluster(name=cluster_name,
                                                       servers=self.servers,
                                                       vbuckets=self.vbuckets)

        # Initialize self.cluster with first available cluster as default
        self.cluster = self.cb_clusters[cluster_name_format %
                                        default_cluster_index]
        self.cluster_util = ClusterUtils(self.task_manager)
        self.bucket_util = BucketUtils(self.cluster_util, self.task)

        CbServer.enterprise_edition = \
            self.cluster_util.is_enterprise_edition(self.cluster)
        if CbServer.enterprise_edition:
            self.cluster.edition = "enterprise"
        else:
            self.cluster.edition = "community"

        if self.standard_buckets > 10:
            self.bucket_util.change_max_buckets(self.cluster.master,
                                                self.standard_buckets)

        for cluster_name, cluster in self.cb_clusters.items():
            # Append initial master node to the nodes_in_cluster list
            cluster.nodes_in_cluster.append(cluster.master)

            shell = RemoteMachineShellConnection(cluster.master)
            self.os_info = shell.extract_remote_info().type.lower()
            if self.os_info != 'windows':
                if cluster.master.ssh_username != "root":
                    self.nonroot = True
                    shell.disconnect()
                    break
            shell.disconnect()

        self.log_setup_status("OnPremBaseTest", "started")
        try:
            # Construct dict of mem. quota percent / mb per service
            mem_quota_percent = dict()
            # Construct dict of mem. quota percent per service
            if self.kv_mem_quota_percent:
                mem_quota_percent[CbServer.Services.KV] = \
                    self.kv_mem_quota_percent
            if self.index_mem_quota_percent:
                mem_quota_percent[CbServer.Services.INDEX] = \
                    self.index_mem_quota_percent
            if self.cbas_mem_quota_percent:
                mem_quota_percent[CbServer.Services.CBAS] = \
                    self.cbas_mem_quota_percent
            if self.fts_mem_quota_percent:
                mem_quota_percent[CbServer.Services.FTS] = \
                    self.fts_mem_quota_percent
            if self.eventing_mem_quota_percent:
                mem_quota_percent[CbServer.Services.EVENTING] = \
                    self.eventing_mem_quota_percent

            if not mem_quota_percent:
                mem_quota_percent = None

            if self.skip_setup_cleanup:
                # Update current server/service map and buckets for the cluster
                for _, cluster in self.cb_clusters.items():
                    self.cluster_util.update_cluster_nodes_service_list(
                        cluster)
                    cluster.buckets = self.bucket_util.get_all_buckets(cluster)
                return
            else:
                for cluster_name, cluster in self.cb_clusters.items():
                    self.log.info("Delete all buckets and rebalance out "
                                  "other nodes from '%s'" % cluster_name)
                    self.cluster_util.cluster_cleanup(cluster,
                                                      self.bucket_util)

            reload(Cb_constants)

            # avoid clean up if the previous test has been tear down
            if self.case_number == 1 or self.case_number > 1000:
                if self.case_number > 1000:
                    self.log.warn("TearDown for prev test failed. Will retry")
                    self.case_number -= 1000
                self.tearDownEverything(reset_cluster_env_vars=False)

            for cluster_name, cluster in self.cb_clusters.items():
                if not self.skip_cluster_reset:
                    self.initialize_cluster(
                        cluster_name,
                        cluster,
                        services=None,
                        services_mem_quota_percent=mem_quota_percent)

                # Update initial service map for the master node
                self.cluster_util.update_cluster_nodes_service_list(cluster)

                # Set this unconditionally
                RestConnection(cluster.master).set_internalSetting(
                    "magmaMinMemoryQuota", 256)

            # Enable dp_version since we need collections enabled
            if self.enable_dp:
                tasks = []
                for server in self.cluster.servers:
                    task = self.node_utils.async_enable_dp(server)
                    tasks.append(task)
                for task in tasks:
                    self.task_manager.get_task_result(task)

            # Enforce tls on nodes of all clusters
            if self.use_https and self.enforce_tls:
                for _, cluster in self.cb_clusters.items():
                    tasks = []
                    for node in cluster.servers:
                        task = self.node_utils.async_enable_tls(node)
                        tasks.append(task)
                    for task in tasks:
                        self.task_manager.get_task_result(task)
                    self.log.info(
                        "Validating if services obey tls only on servers {0}".
                        format(cluster.servers))
                    status = self.cluster_util.check_if_services_obey_tls(
                        cluster.servers)
                    if not status:
                        self.fail("Services did not honor enforce tls")

            # Enforce IPv4 or IPv6 or both
            if self.ipv4_only or self.ipv6_only:
                for _, cluster in self.cb_clusters.items():
                    status, msg = self.cluster_util.enable_disable_ip_address_family_type(
                        cluster, True, self.ipv4_only, self.ipv6_only)
                    if not status:
                        self.fail(msg)

            self.standard = self.input.param("standard", "pkcs8")
            self.passphrase_type = self.input.param("passphrase_type",
                                                    "script")
            self.encryption_type = self.input.param("encryption_type",
                                                    "aes256")
            if self.multiple_ca:
                for _, cluster in self.cb_clusters.items():
                    cluster.x509 = x509main(
                        host=cluster.master,
                        standard=self.standard,
                        encryption_type=self.encryption_type,
                        passphrase_type=self.passphrase_type)
                    self.generate_and_upload_cert(cluster.servers,
                                                  cluster.x509,
                                                  upload_root_certs=True,
                                                  upload_node_certs=True,
                                                  upload_client_certs=True)
                    payload = "name=cbadminbucket&roles=admin&password=password"
                    rest = RestConnection(cluster.master)
                    rest.add_set_builtin_user("cbadminbucket", payload)

            for cluster_name, cluster in self.cb_clusters.items():
                self.modify_cluster_settings(cluster)

            # Track test start time only if we need system log validation
            if self.validate_system_event_logs:
                self.system_events.set_test_start_time()
            self.log_setup_status("OnPremBaseTest", "finished")
            self.__log("started")
        except Exception as e:
            traceback.print_exc()
            self.task.shutdown(force=True)
            self.fail(e)
        finally:
            # Track test start time only if we need system log validation
            if self.validate_system_event_logs:
                self.system_events.set_test_start_time()

            self.log_setup_status("OnPremBaseTest", "finished")
Ejemplo n.º 4
0
class LimitTest(ClusterSetup):
    def setUp(self):
        super(LimitTest, self).setUp()
        self.no_of_users = self.input.param("users", 3)

        # The resource we're testing
        self.resource_name = self.input.param(
            "resource_name", "ns_server_num_concurrent_requests")

        # The threshold at which the limit is configured at
        self.resource_limit = self.input.param("resource_limit", 100)

        # Extra resources - a list of key:value pairs separated by dashes
        self.extra_resources = self.input.param("extra_resources", "")
        self.extra_resources = dict(
            item.split(":") for item in self.extra_resources.split("-")
            if item)

        # The absolute error that is allowed in validation
        self.error = self.input.param("error", 5)

        # The difference between throughput changes
        self.throughput_difference = self.input.param("throughput_difference",
                                                      50)

        # The retry timeout
        self.retry_timeout = self.input.param("retry_timeout", 5)

        # A multiplier for the throughput producer (e.g. use 1 << 20 for
        # mebibytes)
        self.units = self.input.param("units", 1 << 20)

        # A rest connection
        self.rest = RestConnection(self.cluster.master)

        # The name of the bucket
        self.bucket_name = "default"

        # Create bucket
        self.fragmentationPercentage = self.input.param(
            "fragmentationPercentage", False)
        bucket_params = Bucket({
            "name":
            self.bucket_name,
            "fragmentationPercentage":
            self.fragmentationPercentage
        })
        self.bucket_util.create_bucket(self.cluster, bucket_params)

        # Create users
        self.users = self.create_users()

        # Some hints for controlling the user resource tasks
        self.hints = {}

        # A streaming uri for http long polling
        self.hints["streaminguri"] = RestConnection(
            self.cluster.master).get_path("pools/default/buckets/default",
                                          "streamingUri")

        # A factory-esque object that produces objects for the given resource
        self.resource_producer = UserResourceProducer(self.resource_name,
                                                      self.hints)

        # Create throughput tasks from test params
        self.tasks = self.create_tasks()

    def create_users(self):
        """ Creates users """
        users = [User("user{}".format(i)) for i in range(self.no_of_users)]
        for user in users:
            self.rest.add_set_builtin_user(user.username, user.params())
        return users

    def create_tasks(self):
        """ Creates resource tasks """
        tasks = []
        for user in self.users:
            for node in self.cluster.servers[:self.nodes_init]:
                tasks.append(
                    self.resource_producer.get_resource_task(
                        user, copy_node_for_user(user, node)))
        return tasks

    def run_compaction(self):
        if self.fragmentationPercentage:
            self.bucket_util._run_compaction(self.cluster, number_of_times=2)

    def remove_user_and_task(self, user):
        """ Removes a user and their associated task """
        self.users.remove(user)
        self.tasks = [task for task in self.tasks if task.user != user]
        self.rest.delete_builtin_user(user.username)

    def insert_user_and_task(self, user):
        """ Inserts a user and their associated task """
        self.users.append(user)
        self.rest.add_set_builtin_user(user.username, user.params())

        for node in self.cluster.servers[:self.nodes_init]:
            self.tasks.append(
                self.resource_producer.get_resource_task(
                    user, copy_node_for_user(user, node)))

    def get_stats(self, throughput, above=True):
        nodes_in_server = self.cluster_util.get_nodes_in_cluster(self.cluster)
        self.resource_producer.get_resource_stat_monitor(
            self.users, nodes_in_server, throughput, above)

    def set_throughput_to_zero(self):
        """ Set the throughput for all tasks to 0 """
        for task in self.tasks:
            task.set_throughput(0)

    def tearDown(self):
        super(LimitTest, self).tearDown()

    def enforce(self):
        """ Make effects take place """
        self.rest.enforce_limits()

    def set_limits_for_all_users(self, resource_name, resource_limit):
        """ Sets limits for all users and makes them take effect """
        # Configure limits from the test params
        limit_config = LimitConfig()
        limit_config.set_limit(resource_name, resource_limit)

        # Configure extra limits
        for resource_name, resource_limit in self.extra_resources.items():
            limit_config.set_limit(resource_name, int(resource_limit))

        # Set user limits
        for user in self.users:
            self.rest.add_set_builtin_user(
                user.username, user.params(limit_config.get_user_config()))

        # A pattern for matching UUID-4
        pattern = re.compile(
            r"^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
        )

        # Ensure the user limits were set
        if limit_config.get_user_config():
            for user in self.users:
                content = self.rest.get_builtin_user(user.username)
                # Ensure the user's uuid is a UUID-4
                self.assertTrue(pattern.match(content['uuid']),
                                "The user's uuid is does match a UUID-4")
                self.assertEqual(content['limits'],
                                 limit_config.get_user_config())

        # Set scope limits
        for user in self.users:
            self.rest.set_scope_limit(self.bucket_name, user.scope.name,
                                      limit_config.get_scope_config())

        # Ensure the scope limits were set
        if limit_config.get_scope_config():
            for user in self.users:
                _, content = BucketHelper(
                    self.cluster.master).list_collections(self.bucket_name)
                scope = next(scope for scope in json.loads(content)['scopes']
                             if scope['name'] == user.scope.name)
                self.assertEqual(scope['limits'],
                                 limit_config.get_scope_config())

        self.enforce()

    def test_below_threshold(self):
        """ A test in which the throughput is below the threshold and operations succeed. """
        throughput_difference = self.throughput_difference

        self.set_limits_for_all_users(self.resource_name, self.resource_limit)

        # Check that a throughput below the limit succeeds
        for task in self.tasks:
            task.set_throughput(self.resource_limit * self.units -
                                throughput_difference)

        for task in self.tasks:
            self.get_throughput = task.get_throughput() + self.error
            self.assertTrue(
                retry_with_timeout(
                    self.retry_timeout,
                    lambda: self.check(task.get_throughput_success(),
                                       task.get_throughput(), self.error)))

        self.get_stats(self.get_throughput, False)

        self.set_throughput_to_zero()

    def check(self, lhs, rhs, error):
        self.log.info(
            "Expected:{} Actual:{} Actual Difference:{} Expected Difference:{}"
            .format(lhs, rhs, abs(lhs - rhs), error))
        return abs(lhs - rhs) <= error

    def check_error(self, actual_error, expected_error):
        self.log.info("Expected: {} Actual: {}".format(expected_error,
                                                       actual_error[:20]))
        return actual_error == expected_error

    def test_above_threshold(self):
        """ A test in which the throughput is above the threshold and
        operations exceeding the threshold fail. """
        throughput_difference = self.throughput_difference

        self.set_limits_for_all_users(self.resource_name, self.resource_limit)

        # Check that a throughput above the threshold is constrained by the
        # resource limit
        for task in self.tasks:
            task.set_throughput(self.resource_limit * self.units +
                                throughput_difference)

        for task in self.tasks:
            self.get_throughput = self.resource_limit * self.units
            self.assertTrue(
                retry_with_timeout(
                    self.retry_timeout,
                    lambda: self.check(task.get_throughput_success(), self.
                                       get_throughput, self.error)))

        # Once above threshold, ensure the expected error message is thrown
        if self.tasks and self.tasks[0].expected_error():
            self.assertTrue(
                retry_with_timeout(
                    self.retry_timeout,
                    lambda: self.check_error(self.tasks[0].error(), self.tasks[
                        0].expected_error())))

        self.run_compaction()

        # set a higher limit to validate stats
        self.set_limits_for_all_users(self.resource_name,
                                      self.resource_limit * 2)

        self.get_stats(self.get_throughput)
        self.set_throughput_to_zero()

    def test_above_to_below_threshold(self):
        """ A test in which the throughput is initially above the threshold and
        operations fail, the throughput is decreased to below the threshold and
        operations succeed. """
        throughput_difference = self.throughput_difference

        self.set_limits_for_all_users(self.resource_name, self.resource_limit)

        # Check that a throughput above the threshold is constrained by the
        # resource limit
        for task in self.tasks:
            task.set_throughput(self.resource_limit * self.units +
                                throughput_difference)

        for task in self.tasks:
            self.assertTrue(
                retry_with_timeout(
                    self.retry_timeout,
                    lambda: self.check(task.get_throughput_success(
                    ), self.resource_limit * self.units, self.error)))

        self.sleep(5)

        # Check that a throughput below the limit succeeds
        for task in self.tasks:
            task.set_throughput(self.resource_limit * self.units -
                                throughput_difference)

        for task in self.tasks:
            self.assertTrue(
                retry_with_timeout(
                    self.retry_timeout,
                    lambda: self.check(task.get_throughput_success(),
                                       task.get_throughput(), self.error)))

        self.set_throughput_to_zero()

    def test_below_to_above_threshold(self):
        """ A test in which the throughput is initially below the threshold and
        operations succeed, the throughput is increased to above the threshold and
        operations fail. """
        throughput_difference = self.throughput_difference

        self.set_limits_for_all_users(self.resource_name, self.resource_limit)

        # Check that a throughput below the limit succeeds
        for task in self.tasks:
            task.set_throughput(self.resource_limit * self.units -
                                throughput_difference)

        for task in self.tasks:
            self.assertTrue(
                retry_with_timeout(
                    self.retry_timeout,
                    lambda: self.check(task.get_throughput_success(),
                                       task.get_throughput(), self.error)))

        self.sleep(5)

        # Check that a throughput above the threshold is constrained by the
        # resource limit
        for task in self.tasks:
            task.set_throughput(self.resource_limit * self.units +
                                throughput_difference)

        for task in self.tasks:
            self.assertTrue(
                retry_with_timeout(
                    self.retry_timeout,
                    lambda: self.check(task.get_throughput_success(
                    ), self.resource_limit * self.units, self.error)))

        self.set_throughput_to_zero()

    def test_user_churn(self):
        """ A test in which initial users are deleted to make space for new
        users. Limits are applied to new users. A throughput of above the
        threshold is applied and it is expected for the throughput to remain at
        or below the limit threshold. """
        for i in range(self.input.param("iterations", 5)):
            # Test that throughput remains at or below the threshold
            self.test_above_threshold()
            # Remove the user from the system
            self.remove_user_and_task(self.users[-1])
            # Add a new user to the system
            self.insert_user_and_task(
                User("user{}".format(len(self.users) + i)))

    def test_cluster_ops(self):
        """ A test in which the limits are configured, a cluser operation
        happens and throughput is produced above the threshold and operations
        exceeding the threshold fail.
        """
        # The number of times the cluster operation happens
        cycles = self.input.param("cycles", 3)
        # The type of cluster operation
        strategy = self.input.param("strategy", "rebalance")

        self.assertGreaterEqual(self.nodes_init, 3,
                                "Requires at least 3 nodes")

        if strategy not in {'rebalance', 'graceful-failover', 'hard-failover'}:
            self.fail(
                "Strategy {} is not an allowed strategy".format(strategy))

        # Apply cluster operation
        apply_rebalance(self.task,
                        self.cluster.servers,
                        cycles=cycles,
                        strategy=strategy)
        # Ensure limits still apply after cluster operation
        self.test_above_threshold()
Ejemplo n.º 5
0
class LWWStatsTests(BaseTestCase):

    # The stats related epctl vbucket commands actually apply to the whole bucket but we need a vbucket parameter,
    # (which is apparently ignore)
    DUMMY_VBUCKET = ' 123'  # the leading space is needed
    DEFAULT_THRESHOLD = 5000000
    ONE_HOUR_IN_SECONDS = 3600

    def setUp(self):

        super(LWWStatsTests, self).setUp()
        self.rest = RestConnection(self.servers[0])

    def tearDown(self):
        super(LWWStatsTests, self).tearDown()

    def test_time_sync_threshold_setting(self):
        '''
        @summary: This method checks for the change in drift threshold
        settings. We change the drift_ahead_threshold value using REST
        and then verify it against retreived value
        '''

        self.log.info('starting test_time_sync_threshold_setting')
        # bucket is created with lww in base test case using the LWW parameter
        # get the stats
        client = MemcachedClientHelper.direct_client(self.servers[0],
                                                     self.buckets[0])
        ahead_threshold = int(
            client.stats()["ep_hlc_drift_ahead_threshold_us"])
        self.assertTrue(ahead_threshold == LWWStatsTests.DEFAULT_THRESHOLD,
                        'Ahead threshold mismatch expected: {0} '\
                        'actual {1}'.format(LWWStatsTests.DEFAULT_THRESHOLD,
                                            ahead_threshold))
        # change the setting and verify it is per the new setting - this may or may not be supported
        cmd = "curl -X POST -u Administrator:password -d "\
        "'driftAheadThresholdMs={0}' http://{1}:8091/pools/default/"\
        "buckets/default".format(str(LWWStatsTests.DEFAULT_THRESHOLD/2000),#Rest API accepts value in milli-seconds
                                 self.servers[0].ip)
        self.log.info("Executing command: %s" % cmd)
        try:
            os.system(cmd)
        except Exception as err:
            self.fail('Exception occurred: %s' % str(err))
        time.sleep(10)
        cl_stats = client.stats()
        ahead_threshold = int(cl_stats["ep_hlc_drift_ahead_threshold_us"])
        self.assertTrue(
            ahead_threshold == LWWStatsTests.DEFAULT_THRESHOLD / 2,
            'Ahead threshold mismatch expected: {0} actual {1}'.format(
                LWWStatsTests.DEFAULT_THRESHOLD / 2, ahead_threshold))
        # generally need to fill out a matrix here behind/ahead - big and small

    def test_time_sync_threshold_setting_rest_call(self):

        self.log.info('starting test_time_sync_threshold_setting_rest_call')
        # bucket is created with lww in base test case using the LWW parameter
        client = MemcachedClientHelper.direct_client(self.servers[0],
                                                     self.buckets[0])
        rest = RestConnection(self.master)
        self.assertTrue(
            rest.set_cas_drift_threshold(self.buckets[0], 100000, 200000),
            'Unable to set the CAS drift threshold')
        time.sleep(15)  # take a few seconds for the stats to settle in
        stats = client.stats()

        self.assertTrue(
            int(stats['ep_hlc_drift_ahead_threshold_us']) == 100000 * 1000,
            'Ahead threshold incorrect. Expected {0} actual {1}'.format(
                100000 * 1000, stats['ep_hlc_drift_ahead_threshold_us']))

        self.assertTrue(
            int(stats['ep_hlc_drift_behind_threshold_us']) == 200000 * 1000,
            'Ahead threshold incorrect. Expected {0} actual {1}'.format(
                200000 * 1000, stats['ep_hlc_drift_behind_threshold_us']))
        # generally need to fill out a matrix here behind/ahead - big and small

    def test_poisoned_cas(self):
        """
        @note:  - set the clock ahead
                - do lots of sets and get some CASs
                - do a set and get the CAS (flag, CAS, value) and save it
                - set the clock back
                - verify the CAS is still big on new sets
                - reset the CAS
                - do the vbucket max cas and verify
                - do a new mutation and verify the CAS is smaller
        """
        #creating a user 'default' for the bucket
        self.log.info('starting test_poisoned_cas')
        payload = "name={0}&roles=admin&password=password".format(
            self.buckets[0].name)
        self.rest.add_set_builtin_user(self.buckets[0].name, payload)
        sdk_client = SDKClient(scheme='couchbase',
                               hosts=[self.servers[0].ip],
                               bucket=self.buckets[0].name)
        mc_client = MemcachedClientHelper.direct_client(
            self.servers[0], self.buckets[0])
        # move the system clock ahead to poison the CAS
        shell = RemoteMachineShellConnection(self.servers[0])
        self.assertTrue(
            shell.change_system_time(LWWStatsTests.ONE_HOUR_IN_SECONDS),
            'Failed to advance the clock')

        output, error = shell.execute_command('date')
        self.log.info('Date after is set forward {0}'.format(output))
        rc = sdk_client.set('key1', 'val1')
        rc = mc_client.get('key1')
        poisoned_cas = rc[1]
        self.log.info('The poisoned CAS is {0}'.format(poisoned_cas))
        # do lots of mutations to set the max CAS for all vbuckets
        gen_load = BlobGenerator('key-for-cas-test',
                                 'value-for-cas-test-',
                                 self.value_size,
                                 end=10000)
        self._load_all_buckets(self.master, gen_load, "create", 0)
        # move the clock back again and verify the CAS stays large
        self.assertTrue(
            shell.change_system_time(-LWWStatsTests.ONE_HOUR_IN_SECONDS),
            'Failed to change the clock')
        output, error = shell.execute_command('date')
        self.log.info('Date after is set backwards {0}'.format(output))
        use_mc_bin_client = self.input.param("use_mc_bin_client", True)

        if use_mc_bin_client:
            rc = mc_client.set('key2', 0, 0, 'val2')
            second_poisoned_cas = rc[1]
        else:
            rc = sdk_client.set('key2', 'val2')
            second_poisoned_cas = rc.cas
        self.log.info(
            'The second_poisoned CAS is {0}'.format(second_poisoned_cas))
        self.assertTrue(
            second_poisoned_cas > poisoned_cas,
            'Second poisoned CAS {0} is not larger than the first poisoned cas'
            .format(second_poisoned_cas, poisoned_cas))
        # reset the CAS for all vbuckets. This needs to be done in conjunction with a clock change. If the clock is not
        # changed then the CAS will immediately continue with the clock. I see two scenarios:
        # 1. Set the clock back 1 hours and the CAS back 30 minutes, the CAS should be used
        # 2. Set the clock back 1 hour, set the CAS back 2 hours, the clock should be use
        # do case 1, set the CAS back 30 minutes.  Calculation below assumes the CAS is in nanoseconds
        earlier_max_cas = poisoned_cas - 30 * 60 * 1000000000
        for i in range(self.vbuckets):
            output, error = shell.execute_cbepctl(
                self.buckets[0], "", "set_vbucket_param", "max_cas ",
                str(i) + ' ' + str(earlier_max_cas))
            if len(error) > 0:
                self.fail('Failed to set the max cas')
        # verify the max CAS
        for i in range(self.vbuckets):
            max_cas = int(
                mc_client.stats('vbucket-details')['vb_' + str(i) +
                                                   ':max_cas'])
            self.assertTrue(
                max_cas == earlier_max_cas,
                'Max CAS not properly set for vbucket {0} set as {1} and observed {2}'
                .format(i, earlier_max_cas, max_cas))
            self.log.info(
                'Per cbstats the max cas for bucket {0} is {1}'.format(
                    i, max_cas))

        rc1 = sdk_client.set('key-after-resetting cas', 'val1')
        rc2 = mc_client.get('key-after-resetting cas')
        set_cas_after_reset_max_cas = rc2[1]
        self.log.info(
            'The later CAS is {0}'.format(set_cas_after_reset_max_cas))
        self.assertTrue(
            set_cas_after_reset_max_cas < poisoned_cas,
            'For {0} CAS has not decreased. Current CAS {1} poisoned CAS {2}'.
            format('key-after-resetting cas', set_cas_after_reset_max_cas,
                   poisoned_cas))
        # do a bunch of sets and verify the CAS is small - this is really only one set, need to do more
        gen_load = BlobGenerator('key-for-cas-test-after-cas-is-reset',
                                 'value-for-cas-test-',
                                 self.value_size,
                                 end=1000)
        self._load_all_buckets(self.master, gen_load, "create", 0)
        gen_load.reset()
        while gen_load.has_next():
            key, value = next(gen_load)
            try:
                rc = mc_client.get(key)
                #rc = sdk_client.get(key)
                cas = rc[1]
                self.assertTrue(
                    cas < poisoned_cas,
                    'For key {0} CAS has not decreased. Current CAS {1} poisoned CAS {2}'
                    .format(key, cas, poisoned_cas))
            except:
                self.log.info('get error with {0}'.format(key))

        rc = sdk_client.set('key3', 'val1')
        better_cas = rc.cas
        self.log.info('The better CAS is {0}'.format(better_cas))
        self.assertTrue(better_cas < poisoned_cas, 'The CAS was not improved')
        # set the clock way ahead - remote_util_OS.py (new)
        # do a bunch of mutations - not really needed
        # do the fix command - cbepctl, the existing way (remote util)
        # do some mutations, verify they conform to the new CAS - build on the CAS code,
        #     where to iterate over the keys and get the CAS?
        """
                    use the SDK client
                    while gen.has_next():
                        key, value = gen.next()
                        get the cas for these
                        also do the vbucket stats
        """
        # also can be checked in the vbucket stats somewhere
        # revert the clock

    def test_drift_stats(self):
        '''
        @note: An exercise in filling out the matrix with the right amount of code,
               we want to test (ahead,behind) and (setwithmeta, deleteWithmeta)
               and (active,replica).
               So for now let's do the set/del in sequences
        '''
        self.log.info('starting test_drift_stats')
        #Creating a user with the bucket name having admin access
        payload = "name={0}&roles=admin&password=password".format(
            self.buckets[0].name)
        self.rest.add_set_builtin_user(self.buckets[0].name, payload)
        check_ahead_threshold = self.input.param("check_ahead_threshold", True)

        self.log.info(
            'Checking the ahead threshold? {0}'.format(check_ahead_threshold))

        sdk_client = SDKClient(scheme='couchbase',
                               hosts=[self.servers[0].ip],
                               bucket=self.buckets[0].name)
        mc_client = MemcachedClientHelper.direct_client(
            self.servers[0], self.buckets[0])
        shell = RemoteMachineShellConnection(self.servers[0])

        # get the current time
        rc = sdk_client.set('key1', 'val1')
        current_time_cas = rc.cas

        test_key = 'test-set-with-metaxxxx'
        vbId = (((zlib.crc32(test_key.encode())) >> 16)
                & 0x7fff) & (self.vbuckets - 1)

        #import pdb;pdb.set_trace()
        # verifying the case where we are within the threshold, do a set and del, neither should trigger
        #mc_active.setWithMeta(key, '123456789', 0, 0, 123, cas)
        rc = mc_client.setWithMeta(test_key, 'test-value', 0, 0, 1,
                                   current_time_cas)
        #rc = mc_client.setWithMetaLWW(test_key, 'test-value', 0, 0, current_time_cas)
        #rc = mc_client.delWithMetaLWW(test_key, 0, 0, current_time_cas+1)

        vbucket_stats = mc_client.stats('vbucket-details')
        ahead_exceeded = int(vbucket_stats['vb_' + str(vbId) +
                                           ':drift_ahead_threshold_exceeded'])
        self.assertTrue(
            ahead_exceeded == 0,
            'Ahead exceeded expected is 0 but is {0}'.format(ahead_exceeded))
        behind_exceeded = int(
            vbucket_stats['vb_' + str(vbId) +
                          ':drift_behind_threshold_exceeded'])
        self.assertTrue(
            behind_exceeded == 0,
            'Behind exceeded expected is 0 but is {0}'.format(behind_exceeded))
        # out of curiousity, log the total counts
        self.log.info(
            'Total stats: total abs drift {0} and total abs drift count {1}'.
            format(vbucket_stats['vb_' + str(vbId) + ':total_abs_drift'],
                   vbucket_stats['vb_' + str(vbId) +
                                 ':total_abs_drift_count']))

        # do the ahead set with meta case - verify: ahead threshold exceeded, total_abs_drift count and abs_drift
        if check_ahead_threshold:
            stat_descriptor = 'ahead'
            cas = current_time_cas + 5000 * LWWStatsTests.DEFAULT_THRESHOLD

        else:
            stat_descriptor = 'behind'
            cas = current_time_cas - (5000 * LWWStatsTests.DEFAULT_THRESHOLD)
        rc = mc_client.setWithMeta(test_key, 'test-value', 0, 0, 0, cas)
        #rc = mc_client.delWithMetaLWW(test_key, 0, 0, cas+1)
        # verify the vbucket stats
        vbucket_stats = mc_client.stats('vbucket-details')
        drift_counter_stat = 'vb_' + str(
            vbId) + ':drift_' + stat_descriptor + '_threshold_exceeded'
        threshold_exceeded = int(
            mc_client.stats('vbucket-details')[drift_counter_stat])
        # MB-21450 self.assertTrue( ahead_exceeded == 2, '{0} exceeded expected is 1 but is {1}'.
        # format( stat_descriptor, threshold_exceeded))

        self.log.info(
            'Total stats: total abs drift {0} and total abs drift count {1}'.
            format(vbucket_stats['vb_' + str(vbId) + ':total_abs_drift'],
                   vbucket_stats['vb_' + str(vbId) +
                                 ':total_abs_drift_count']))

        # and verify the bucket stats: ep_active_hlc_drift_count, ep_clock_cas_drift_threshold_exceeded,
        # ep_active_hlc_drift
        bucket_stats = mc_client.stats()
        ep_active_hlc_drift_count = int(
            bucket_stats['ep_active_hlc_drift_count'])
        ep_clock_cas_drift_threshold_exceeded = int(
            bucket_stats['ep_clock_cas_drift_threshold_exceeded'])
        ep_active_hlc_drift = int(bucket_stats['ep_active_hlc_drift'])

        # Drift count appears to be the number of mutations
        self.assertTrue(
            ep_active_hlc_drift_count > 0,
            'ep_active_hlc_drift_count is 0, expected a positive value')

        # drift itself is the sum of the absolute values of all drifts, so check that it is greater than 0
        self.assertTrue(ep_active_hlc_drift > 0,
                        'ep_active_hlc_drift is 0, expected a positive value')

        # the actual drift count is a little more granular
        expected_drift_threshold_exceed_count = 1
        self.assertTrue(
            expected_drift_threshold_exceed_count ==
            ep_clock_cas_drift_threshold_exceeded,
            'ep_clock_cas_drift_threshold_exceeded is incorrect. Expected {0}, actual {1}'
            .format(expected_drift_threshold_exceed_count,
                    ep_clock_cas_drift_threshold_exceeded))

    def test_logical_clock_ticks(self):

        self.log.info('starting test_logical_clock_ticks')

        payload = "name={0}&roles=admin&password=password".format(
            self.buckets[0].name)
        self.rest.add_set_builtin_user(self.buckets[0].name, payload)
        sdk_client = SDKClient(scheme='couchbase',
                               hosts=[self.servers[0].ip],
                               bucket=self.buckets[0].name)
        mc_client = MemcachedClientHelper.direct_client(
            self.servers[0], self.buckets[0])
        shell = RemoteMachineShellConnection(self.servers[0])

        # do a bunch of mutations to set the max cas
        gen_load = BlobGenerator('key-for-cas-test-logical-ticks',
                                 'value-for-cas-test-',
                                 self.value_size,
                                 end=10000)
        self._load_all_buckets(self.master, gen_load, "create", 0)

        vbucket_stats = mc_client.stats('vbucket-details')
        base_total_logical_clock_ticks = 0
        for i in range(self.vbuckets):
            #print vbucket_stats['vb_' + str(i) + ':logical_clock_ticks']
            base_total_logical_clock_ticks = base_total_logical_clock_ticks + int(
                vbucket_stats['vb_' + str(i) + ':logical_clock_ticks'])
        self.log.info('The base total logical clock ticks is {0}'.format(
            base_total_logical_clock_ticks))

        # move the system clock back so the logical counter part of HLC is used and the logical clock ticks
        # stat is incremented
        self.assertTrue(
            shell.change_system_time(-LWWStatsTests.ONE_HOUR_IN_SECONDS),
            'Failed to advance the clock')

        # do more mutations
        NUMBER_OF_MUTATIONS = 10000
        gen_load = BlobGenerator('key-for-cas-test-logical-ticks',
                                 'value-for-cas-test-',
                                 self.value_size,
                                 end=NUMBER_OF_MUTATIONS)
        self._load_all_buckets(self.master, gen_load, "create", 0)

        vbucket_stats = mc_client.stats('vbucket-details')
        time.sleep(30)
        total_logical_clock_ticks = 0
        for i in range(self.vbuckets):
            total_logical_clock_ticks = total_logical_clock_ticks + int(
                vbucket_stats['vb_' + str(i) + ':logical_clock_ticks'])

        self.log.info('The total logical clock ticks is {0}'.format(
            total_logical_clock_ticks))

        self.assertTrue(
            total_logical_clock_ticks -
            base_total_logical_clock_ticks == NUMBER_OF_MUTATIONS,
            'Expected clock tick {0} actual {1}'.format(
                NUMBER_OF_MUTATIONS,
                total_logical_clock_ticks - base_total_logical_clock_ticks))