예제 #1
0
    def create_required_buckets(self):
        self.log.info("Get the available memory quota")
        bucket_util = bucket_utils(self.master)
        self.info = bucket_util.rest.get_nodes_self()
        threadhold_memory = 1024
        total_memory_in_mb = self.info.memoryFree / 1024**2
        total_available_memory_in_mb = total_memory_in_mb
        active_service = self.info.services

        if "index" in active_service:
            total_available_memory_in_mb -= self.info.indexMemoryQuota
        if "fts" in active_service:
            total_available_memory_in_mb -= self.info.ftsMemoryQuota
        if "cbas" in active_service:
            total_available_memory_in_mb -= self.info.cbasMemoryQuota
        if "eventing" in active_service:
            total_available_memory_in_mb -= self.info.eventingMemoryQuota

        available_memory = total_available_memory_in_mb - threadhold_memory
        self.rest.set_service_mem_quota({
            CbServer.Settings.KV_MEM_QUOTA:
            available_memory,
            CbServer.Settings.CBAS_MEM_QUOTA:
            available_memory - 1024,
            CbServer.Settings.INDEX_MEM_QUOTA:
            available_memory - 1024
        })

        self.log.info("Create CB buckets")
        self.create_bucket(self.master,
                           "GleambookUsers",
                           bucket_ram=available_memory)
        shell = RemoteMachineShellConnection(self.master)
        command = 'curl -i -u Administrator:password --data \'ns_bucket:update_bucket_props("GleambookUsers", [{extra_config_string, "cursor_dropping_upper_mark=70;cursor_dropping_lower_mark=50"}]).\' http://172.23.104.16:8091/diag/eval'
        shell.execute_command(command)

        result = RestConnection(self.query_node).query_tool(
            "CREATE PRIMARY INDEX idx_GleambookUsers ON GleambookUsers;")
        self.sleep(10, "wait for index creation.")
        self.assertTrue(result['status'] == "success")
예제 #2
0
    def test_partial_rollback(self):
        kv_node = self.get_nodes_from_services_map(service_type="kv",
                                                   get_all_nodes=True)
        log.info("kv nodes:{0}".format(kv_node))
        for node in kv_node:
            mem_client = MemcachedClientHelper.direct_client(
                node, self.src_bucket_name)
            mem_client.stop_persistence()
        body = self.create_save_function_body(self.function_name,
                                              self.handler_code,
                                              worker_count=3)
        try:
            task = self.cluster.async_load_gen_docs(
                self.master,
                self.src_bucket_name,
                self.gens_load,
                self.buckets[0].kvs[1],
                'create',
                compression=self.sdk_compression)
        except Exception as e:
            log.info("error while loading data")
        self.deploy_function(body, wait_for_bootstrap=False)
        # Kill memcached on Node A
        self.log.info("Killing memcached on {0}".format(kv_node[1]))
        shell = RemoteMachineShellConnection(kv_node[1])
        shell.kill_memcached()

        # Start persistence on Node B
        self.log.info("Starting persistence on {0}".format(kv_node[0]))
        mem_client = MemcachedClientHelper.direct_client(
            kv_node[0], self.src_bucket_name)
        mem_client.start_persistence()
        # Wait for bootstrap to complete
        self.wait_for_bootstrap_to_complete(body['appname'])
        stats_src = RestConnection(
            self.master).get_bucket_stats(bucket=self.src_bucket_name)
        log.info(stats_src)
        self.verify_eventing_results(self.function_name,
                                     stats_src["curr_items"],
                                     skip_stats_validation=True)
예제 #3
0
 def test_source_bucket_delete_recreate(self):
     self.mutate_and_checkpoint(n=2)
     self.cluster.bucket_delete(self.src_master, 'default')
     self._create_buckets(self.src_nodes)
     dest_cluster_name = self._get_cluster_names()[1]
     RestConnection(self.src_master).start_replication(
         XDCRConstants.REPLICATION_TYPE_CONTINUOUS, 'default',
         dest_cluster_name, self.rep_type)
     self.sleep(5)
     self.key_counter = 0
     self.keys_loaded = []
     if self.was_pre_rep_successful():
         self.log.info(
             "_pre_replicate following the source bucket recreate was successful: {}"
             .format(self.num_successful_prereps_so_far))
         self.verify_next_checkpoint_passes()
     else:
         self.fail(
             "ERROR: _pre_replicate following source bucket recreate was unsuccessful"
         )
     self.sleep(10)
     self.verify_revid()
 def verify_eventing_results_of_all_functions(self,
                                              docs_expected,
                                              verify_results=True,
                                              timeout=600):
     if verify_results:
         # Verify the results of all the buckets
         self.verify_eventing_results(self.function_name,
                                      docs_expected,
                                      skip_stats_validation=True,
                                      timeout=timeout)
         self.verify_eventing_results(self.function_name,
                                      docs_expected,
                                      skip_stats_validation=True,
                                      bucket=self.dst_bucket_name1,
                                      timeout=timeout)
         self.verify_eventing_results(self.function_name,
                                      docs_expected,
                                      skip_stats_validation=True,
                                      bucket=self.dst_bucket_name2,
                                      timeout=timeout)
     else:
         # Just print the stats after sleeping for 10 mins. Required to get the latest stats.
         self.sleep(timeout)
         eventing_nodes = self.get_nodes_from_services_map(
             service_type="eventing", get_all_nodes=True)
         for eventing_node in eventing_nodes:
             rest_conn = RestConnection(eventing_node)
             out = rest_conn.get_all_eventing_stats()
             log.info("Stats for Node {0} is \n{1} ".format(
                 eventing_node.ip, json.dumps(out, sort_keys=True,
                                              indent=4)))
         for bucket in [
                 self.dst_bucket_name, self.dst_bucket_name1,
                 self.dst_bucket_name2
         ]:
             stats_dst = self.rest.get_bucket_stats(bucket)
             log.info(
                 "Number of docs in {0} bucket actual : {1} expected : {2} "
                 .format(bucket, stats_dst["curr_items"], docs_expected))
예제 #5
0
    def test_partial_rollback_pause_resume(self):
        kv_node = self.get_nodes_from_services_map(service_type="kv", get_all_nodes=True)
        log.info("kv nodes:{0}".format(kv_node))
        for node in kv_node:
            mem_client = MemcachedClientHelper.direct_client(node, self.src_bucket_name)
            mem_client.stop_persistence()
        body = self.create_save_function_body(self.function_name, self.handler_code,
                                              worker_count=3)
        if self.is_curl:
            body['depcfg']['curl'] = []
            body['depcfg']['curl'].append({"hostname": self.hostname, "value": "server", "auth_type": self.auth_type,
                                           "username": self.curl_username, "password": self.curl_password,"cookies": self.cookies})
        try:
            task = self.cluster.async_load_gen_docs(self.master, self.src_bucket_name, self.gens_load,
                                                    self.buckets[0].kvs[1], 'create', compression=self.sdk_compression)
        except Exception as e:
            log.info("error while loading data")
        self.deploy_function(body)
        if self.pause_resume:
            self.pause_function(body)
        # Kill memcached on Node A
        self.log.info("Killing memcached on {0}".format(kv_node[1]))
        shell = RemoteMachineShellConnection(kv_node[1])
        shell.kill_memcached()

        # Start persistence on Node B
        self.log.info("Starting persistence on {0}".
                      format(kv_node[0]))
        mem_client = MemcachedClientHelper.direct_client(kv_node[0],
                                                         self.src_bucket_name)
        mem_client.start_persistence()
        if self.pause_resume:
            self.wait_for_handler_state(body['appname'], "paused")
            self.resume_function(body)
        else:
            self.wait_for_handler_state(body['appname'], "deployed")
        stats_src = RestConnection(self.master).get_bucket_stats(bucket=self.src_bucket_name)
        log.info(stats_src)
        self.verify_eventing_results(self.function_name, stats_src["curr_items"], skip_stats_validation=True)
예제 #6
0
 def setUp(self):
     super(EventingUpgrade, self).setUp()
     self.rest = RestConnection(self.master)
     self.server = self.master
     self.queue = Queue.Queue()
     self.src_bucket_name = self.input.param('src_bucket_name',
                                             'src_bucket')
     self.eventing_log_level = self.input.param('eventing_log_level',
                                                'INFO')
     self.dst_bucket_name = self.input.param('dst_bucket_name',
                                             'dst_bucket')
     self.dst_bucket_name1 = self.input.param('dst_bucket_name1',
                                              'dst_bucket1')
     self.dst_bucket_curl = self.input.param('dst_bucket_curl',
                                             'dst_bucket_curl')
     self.source_bucket_mutation = self.input.param(
         'source_bucket_mutation', 'source_bucket_mutation')
     self.metadata_bucket_name = self.input.param('metadata_bucket_name',
                                                  'metadata')
     self.n1ql_op_dst = self.input.param('n1ql_op_dst', 'n1ql_op_dst')
     self.gens_load = self.generate_docs(self.docs_per_day)
     self.upgrade_version = self.input.param("upgrade_version")
예제 #7
0
    def indexing_throughput_stats(self, interval=15):
        self._task['indexer_info'] = list()
        indexers = defaultdict(dict)
        rests = [RestConnection(node) for node in self.nodes]
        while not self._aborted():
            time.sleep(interval)  # 15 seconds by default

            # Grab indexer tasks from all nodes
            tasks = list()
            for rest in rests:
                try:
                    active_tasks = rest.active_tasks()
                except ServerUnavailableException, error:
                    log.error(error)
                    continue
                indexer_tasks = filter(lambda t: t['type'] == 'indexer',
                                       active_tasks)
                tasks.extend(indexer_tasks)

            # Calculate throughput for every unique PID
            thr = 0
            for task in tasks:
                uiid = task['pid'] + str(task['started_on'])

                changes_delta = \
                    task['changes_done'] - indexers[uiid].get('changes_done', 0)
                time_delta = \
                    task['updated_on'] - indexers[uiid].get('updated_on',
                                                            task['started_on'])
                if time_delta:
                    thr += changes_delta / time_delta
                indexers[uiid]['changes_done'] = task['changes_done']
                indexers[uiid]['updated_on'] = task['updated_on']

            # Average throughput
            self._task['indexer_info'].append({
                'indexing_throughput': thr,
                'timestamp': time.time()
            })
예제 #8
0
    def start_replication(self,
                          master,
                          slave,
                          replication_type='continuous',
                          buckets=None,
                          bidir=False,
                          suffix='A'):
        """Add remote cluster and start replication"""

        master_rest_conn = RestConnection(master)
        remote_reference = 'remote_cluster_' + suffix

        master_rest_conn.add_remote_cluster(slave.ip, slave.port,
                                            slave.rest_username,
                                            slave.rest_password,
                                            remote_reference)

        if not buckets:
            buckets = self.get_buckets()
        else:
            buckets = self.get_buckets(reversed=True)

        for bucket in buckets:
            master_rest_conn.start_replication(replication_type, bucket,
                                               remote_reference)

        if self.parami('xdcr_num_buckets', 1) > 1 and suffix == 'A':
            self.start_replication(slave,
                                   master,
                                   replication_type,
                                   buckets,
                                   suffix='B')

        if bidir:
            self.start_replication(slave,
                                   master,
                                   replication_type,
                                   buckets,
                                   suffix='B')
 def setUp(self):
     if self._testMethodDoc:
         log.info("\n\nStarting Test: %s \n%s" %
                  (self._testMethodName, self._testMethodDoc))
     else:
         log.info("\n\nStarting Test: %s" % (self._testMethodName))
     self.input = TestInputSingleton.input
     self.input.test_params.update({"default_bucket": False})
     super(EventingBaseTest, self).setUp()
     self.master = self.servers[0]
     self.server = self.master
     self.restServer = self.get_nodes_from_services_map(
         service_type="eventing")
     self.rest = RestConnection(self.restServer)
     self.log.info(
         "Setting the min possible memory quota so that adding mode nodes to the cluster wouldn't be a problem."
     )
     self.rest.set_service_memoryQuota(service='memoryQuota',
                                       memoryQuota=330)
     self.rest.set_service_memoryQuota(service='indexMemoryQuota',
                                       memoryQuota=INDEX_QUOTA)
     # self.rest.set_service_memoryQuota(service='eventingMemoryQuota', memoryQuota=EVENTING_QUOTA)
     self.src_bucket_name = self.input.param('src_bucket_name',
                                             'src_bucket')
     self.eventing_log_level = self.input.param('eventing_log_level',
                                                'TRACE')
     self.dst_bucket_name = self.input.param('dst_bucket_name',
                                             'dst_bucket')
     self.dst_bucket_name1 = self.input.param('dst_bucket_name1',
                                              'dst_bucket1')
     self.metadata_bucket_name = self.input.param('metadata_bucket_name',
                                                  'metadata')
     self.create_functions_buckets = self.input.param(
         'create_functions_buckets', True)
     self.docs_per_day = self.input.param("doc-per-day", 1)
     self.use_memory_manager = self.input.param('use_memory_manager', True)
     random.seed(datetime.time)
     self.function_name = "Function_{0}_{1}".format(
         random.randint(1, 1000000000), self._testMethodName)
예제 #10
0
 def test_maxttl_greater_doc_expiry(self):
     """
     maxTTL is set to 200s in this test,
     Docs have lesser TTL.
     :return:
     """
     for bucket in self.buckets:
         self._load_json(bucket, self.num_items, exp=int(self.maxttl)-100)
     self.sleep(int(self.maxttl-100), "waiting for all docs to expire per maxTTL rule...")
     self.expire_pager(self.servers)
     self.sleep(20, "waiting for item count to come down...")
     for bucket in self.buckets:
         items = RestConnection(self.master).get_active_key_count(bucket)
         self.log.info("Doc expiry set to = {0}s, maxTTL = {1}s, after {2}s, item count = {3}".format(
             int(self.maxttl) - 100,
             self.maxttl-100,
             self.maxttl-100,
             items))
         if items == 0:
             self.log.info("SUCCESS: Docs with lesser expiry deleted")
         else:
             self.fail("FAIL: Doc with lesser expiry still present past ttl")
예제 #11
0
 def test_source_bucket_delete_recreate(self):
     self.mutate_and_checkpoint(n=2)
     self.src_cluster.delete_bucket('default')
     self.sleep(60)
     self.create_buckets_on_cluster(self.src_cluster.get_name())
     RestConnection(self.src_master).start_replication(
         REPLICATION_TYPE.CONTINUOUS, 'default', "remote_cluster_%s-%s" %
         (self.src_cluster.get_name(), self.dest_cluster.get_name()))
     self.src_cluster.set_global_checkpt_interval(60)
     self.key_counter = 0
     self.keys_loaded = []
     if self.was_pre_rep_successful():
         self.log.info(
             "_pre_replicate following the source bucket recreate was successful: {0}"
             .format(self.num_successful_prereps_so_far))
         self.verify_next_checkpoint_passes()
     else:
         self.fail(
             "ERROR: _pre_replicate following source bucket recreate was unsuccessful"
         )
     self.sleep(10)
     self.verify_revid()
예제 #12
0
 def set_backup_node(self, backup_node):
     self.backup_rest = RestConnection(backup_node)
     self.backup_node = backup_node
     self.backup_api = \
         self.backup_rest.index_baseUrl + "api/v1/bucket/{0}/backup{1}"
     self.is_backup_exists = False
     self.backup_data = {}
     shell = RemoteMachineShellConnection(backup_node)
     info = shell.extract_remote_info().type.lower()
     if info == 'linux':
         self.cli_command_location = testconstants.LINUX_COUCHBASE_BIN_PATH
         self.backup_path = testconstants.LINUX_BACKUP_PATH
     elif info == 'windows':
         self.cmd_ext = ".exe"
         self.cli_command_location = \
             testconstants.WIN_COUCHBASE_BIN_PATH_RAW
         self.backup_path = testconstants.WIN_BACKUP_C_PATH
     elif info == 'mac':
         self.cli_command_location = testconstants.MAC_COUCHBASE_BIN_PATH
         self.backup_path = testconstants.LINUX_BACKUP_PATH
     else:
         raise Exception("OS not supported.")
예제 #13
0
    def create_required_buckets(self):
        self.log.info("Get the available memory quota")
        bucket_util = bucket_utils(self.master)
        self.info = bucket_util.rest.get_nodes_self()
        threadhold_memory = 1024
        total_memory_in_mb = self.info.memoryFree / 1024**2
        total_available_memory_in_mb = total_memory_in_mb
        active_service = self.info.services

        if "index" in active_service:
            total_available_memory_in_mb -= self.info.indexMemoryQuota
        if "fts" in active_service:
            total_available_memory_in_mb -= self.info.ftsMemoryQuota
        if "cbas" in active_service:
            total_available_memory_in_mb -= self.info.cbasMemoryQuota
        if "eventing" in active_service:
            total_available_memory_in_mb -= self.info.eventingMemoryQuota

        print(total_memory_in_mb)
        available_memory = total_available_memory_in_mb - threadhold_memory
        self.rest.set_service_memoryQuota(service='memoryQuota',
                                          memoryQuota=available_memory)
        self.rest.set_service_memoryQuota(service='cbasMemoryQuota',
                                          memoryQuota=available_memory - 1024)
        self.rest.set_service_memoryQuota(service='indexMemoryQuota',
                                          memoryQuota=available_memory - 1024)

        self.log.info("Create CB buckets")

        self.create_bucket(self.master,
                           "GleambookUsers",
                           bucket_ram=available_memory,
                           replica=0)
        self.sleep(30, "wait for bucket warmup to complete.")
        result = RestConnection(self.query_node).query_tool(
            "CREATE PRIMARY INDEX idx_GleambookUsers ON GleambookUsers;")
        self.sleep(10, "wait for index creation.")
        self.assertTrue(result['status'] == "success")
예제 #14
0
 def check_eventing_logs_for_panic(self):
     self.generate_map_nodes_out_dist()
     panic_str = "panic"
     eventing_nodes = self.get_nodes_from_services_map(service_type="eventing", get_all_nodes=True)
     if not eventing_nodes:
         return None
     for eventing_node in eventing_nodes:
         shell = RemoteMachineShellConnection(eventing_node)
         _, dir_name = RestConnection(eventing_node).diag_eval(
             'filename:absname(element(2, application:get_env(ns_server,error_logger_mf_dir))).')
         eventing_log = str(dir_name) + '/eventing.log*'
         count, err = shell.execute_command("zgrep \"{0}\" {1} | wc -l".
                                            format(panic_str, eventing_log))
         if isinstance(count, list):
             count = int(count[0])
         else:
             count = int(count)
         if count > self.panic_count:
             log.info("===== PANIC OBSERVED IN EVENTING LOGS ON SERVER {0}=====".format(eventing_node.ip))
             panic_trace, _ = shell.execute_command("zgrep \"{0}\" {1}".
                                                    format(panic_str, eventing_log))
             log.info("\n {0}".format(panic_trace))
             self.panic_count = count
         os_info = shell.extract_remote_info()
         if os_info.type.lower() == "windows":
             # This is a fixed path in all windows systems inside couchbase
             dir_name_crash = 'c://CrashDumps'
         else:
             dir_name_crash = str(dir_name) + '/../crash/'
         core_dump_count, err = shell.execute_command("ls {0}| wc -l".format(dir_name_crash))
         if isinstance(core_dump_count, list):
             core_dump_count = int(core_dump_count[0])
         else:
             core_dump_count = int(core_dump_count)
         if core_dump_count > 0:
             log.info("===== CORE DUMPS SEEN ON EVENTING NODES, SERVER {0} : {1} crashes seen =====".format(
                      eventing_node.ip, core_dump_count))
         shell.disconnect()
예제 #15
0
 def test_role_permission_noaccess_bucket(self):
     rest = RestConnection(self.master)
     rest.create_bucket(bucket='default', ramQuotaMB=100)
     # rest1=RestConnection(self.master)
     # rest1.create_bucket(bucket='default1', ramQuotaMB=100,proxyPort=11212)
     bucket_name = self.bucket_name.split(":")
     for server in self.servers[:self.nodes_init]:
         if (len(bucket_name) > 1):
             for bucket in bucket_name:
                 rbacmain(server, self.auth_type
                          )._check_role_permission_validate_multiple(
                              self.user_id, self.user_role, bucket,
                              self.role_map, self.incorrect_bucket)
         else:
             rbacmain(
                 server,
                 self.auth_type)._check_role_permission_validate_multiple(
                     self.user_id,
                     self.user_role,
                     self.bucket_name,
                     self.role_map,
                     no_bucket_access=self.no_bucket_access,
                     no_access_bucket_name=self.no_access_bucket_name)
예제 #16
0
 def test_incorrect_plain_passphrase_fails(self):
     """
     Verify that an incorrect passphrase given during reload of an encrypted
     node pkey fails
     """
     self.x509.generate_multiple_x509_certs(servers=self.servers)
     self.x509.upload_root_certs(server=self.master)
     for server in self.servers[:self.nodes_init]:
         self.x509.copy_node_cert(server=server)
     params = dict()
     params["privateKeyPassphrase"] = dict()
     params["privateKeyPassphrase"]["type"] = "plain"
     params["privateKeyPassphrase"]["password"] = \
         self.x509.private_key_passphrase_map[str(self.master.ip)] + "incorrect"
     params = json.dumps(params)
     rest = RestConnection(self.master)
     status, content = rest.reload_certificate(params=params)
     if not status:
         self.log.info(
             "Incorrect plain passphrase failed as expected {0}".format(
                 content))
     else:
         self.fail("incorrect plain passphrase worked")
예제 #17
0
    def customize_xdcr_settings(self):
        """Set custom XDCR environment variables"""
        max_concurrent_reps_per_doc = self.param('max_concurrent_reps_per_doc', None)
        xdcr_doc_batch_size_kb = self.param('xdcr_doc_batch_size_kb', None)
        xdcr_checkpoint_interval = self.param('xdcr_checkpoint_interval', None)
        xdcr_latency_optimization = self.param('xdcr_latency_optimization', None)

        if max_concurrent_reps_per_doc:
            param = 'xdcrMaxConcurrentReps'
            value = max_concurrent_reps_per_doc
        elif xdcr_doc_batch_size_kb:
            param = 'xdcrDocBatchSizeKb'
            value = xdcr_doc_batch_size_kb
        elif xdcr_checkpoint_interval:
            param = 'xdcrCheckpointInterval'
            value = xdcr_checkpoint_interval
        else:
            return

        self.log.info("changing {0} to {1}".format(param, value))

        for servers in self.input.clusters.values():
            RestConnection(servers[0]).set_internalSetting(param, value)
예제 #18
0
 def tearDown(self):
     log.info("Starting tearDown")
     # catch panics and print it in the test log
     self.check_eventing_logs_for_panic()
     rest = RestConnection(self.master)
     buckets = rest.get_buckets()
     for bucket in buckets:
         stats = rest.get_bucket_stats(bucket)
         self.log.info("Bucket {} DGM is {}".format(
             bucket, stats["vb_active_resident_items_ratio"]))
     self.hostname = self.input.param('host', 'https://postman-echo.com/')
     if self.hostname == 'local':
         self.teardown_curl()
     # check metadata bucke is empty
     if len(buckets) > 0 and not self.skip_metabucket_check:
         stats_meta = rest.get_bucket_stats("metadata")
         self.log.info("number of documents in metadata bucket {}".format(
             stats_meta["curr_items"]))
         if stats_meta["curr_items"] != 0:
             raise Exception(
                 "metdata bucket is not empty at the end of test")
     super(EventingBaseTest, self).tearDown()
     log.info("Completed tearDown")
예제 #19
0
    def execute_statement_on_cbas_via_rest(self,
                                           statement,
                                           mode=None,
                                           rest=None,
                                           timeout=120,
                                           client_context_id=None):
        """
        Executes a statement on CBAS using the REST API using REST Client
        """
        pretty = "true"
        if not rest:
            rest = RestConnection(self.master)
        try:
            response = rest.execute_statement_on_cbas(statement, mode, pretty,
                                                      timeout,
                                                      client_context_id)
            response = json.loads(response)
            if "errors" in response:
                errors = response["errors"]
            else:
                errors = None

            if "results" in response:
                results = response["results"]
            else:
                results = None

            if "handle" in response:
                handle = response["handle"]
            else:
                handle = None

            return response["status"], response[
                "metrics"], errors, results, handle

        except Exception, e:
            raise Exception(str(e))
    def test_maxttl_with_doc_updates(self):
        """
        1. Create a bucket with ttl = 60s
        2. Upload 1000 docs with exp = 40s
        3. After 20s, Update docs with exp = 60s
        4. After 40s, run expiry pager again and get item count, must be 1000
        5. After 20s, run expiry pager again and get item count, must be 0
        """
        rest = RestConnection(self.master)
        for bucket in self.buckets:
            self._load_json(bucket, self.num_items, exp=40)

        self.sleep(20, "waiting to update docs with exp=60s...")

        for bucket in self.buckets:
            self._load_json(bucket, self.num_items, exp=60)

        self.sleep(40, "waiting before running expiry pager...")
        self.expire_pager(self.servers)
        self.sleep(20, "waiting for item count to come down...")
        for bucket in self.buckets:
            items = rest.get_active_key_count(bucket)
            self.log.info("Items: {0}".format(items))
            if items != self.num_items:
                self.fail(
                    "FAIL: Docs with updated expiry deleted unexpectedly!")

        self.sleep(20, "waiting before running expiry pager...")
        self.expire_pager(self.servers)
        self.sleep(20, "waiting for item count to come down...")
        for bucket in self.buckets:
            items = rest.get_active_key_count(bucket)
            self.log.info("Items: {0}".format(items))
            if items != 0:
                self.fail(
                    "FAIL: Docs with updated expiry not deleted after new exp has elapsed!"
                )
예제 #21
0
 def test_restart_node_with_encrypted_pkeys(self):
     """
     1. Init node cluster, with encrypted node pkeys
     2. Restart a node
     3. Failover and delta recover that node
     4. Restart the node again and rebalance-out this time
     5. Repeat steps 2 to 5 until you are left with master node
     """
     self.x509.generate_multiple_x509_certs(
         servers=self.servers[:self.nodes_init])
     self.x509.upload_root_certs(self.master)
     self.x509.upload_node_certs(servers=self.servers[:self.nodes_init])
     rest = RestConnection(self.master)
     nodes_in_cluster = [node for node in self.servers[:self.nodes_init]]
     for node in self.servers[1:self.nodes_init]:
         shell = RemoteMachineShellConnection(node)
         shell.restart_couchbase()
         shell.disconnect()
         self.sleep(10, "Wait after restart")
         self.cluster.async_failover(nodes_in_cluster, [node],
                                     graceful=False)
         self.wait_for_failover_or_assert(1)
         rest.set_recovery_type("ns_1@" + node.ip, recoveryType="delta")
         https_val = CbServer.use_https  # so that add_node uses https
         CbServer.use_https = True
         task = self.cluster.async_rebalance(nodes_in_cluster, [], [])
         CbServer.use_https = https_val
         self.wait_for_rebalance_to_complete(task)
         shell = RemoteMachineShellConnection(node)
         shell.restart_couchbase()
         shell.disconnect()
         https_val = CbServer.use_https  # so that add_node uses https
         CbServer.use_https = True
         task = self.cluster.async_rebalance(nodes_in_cluster, [], [node])
         self.wait_for_rebalance_to_complete(task)
         CbServer.use_https = https_val
         nodes_in_cluster.remove(node)
예제 #22
0
 def test_fts_log_redaction(self):
     gen_create = BlobGenerator('logredac',
                                'logredac-',
                                self.value_size,
                                end=self.num_items)
     self._load_all_buckets(self.master, gen_create, "create", 0)
     index_definition = {
         "type": "fulltext-index",
         "name": "index1",
         "sourceType": "couchbase",
         "sourceName": "default"
     }
     rest = RestConnection(self.master)
     status = rest.create_fts_index("index1", index_definition)
     if status:
         log.info("Index 'index1' created")
     else:
         log.info("Error creating index, status = {0}".format(status))
     self.sleep(60, "waiting for docs to get indexed")
     query_json = {"query": {"field": "type", "match": "emp"}}
     hits, _, _, _ = rest.run_fts_query(index_name="index1",
                                        query_json=query_json)
     log.info("Hits from query {0}: {1}".format(query_json, hits))
     self.set_redaction_level()
     self.start_logs_collection()
     result = self.monitor_logs_collection()
     logs_path = result["perNode"]["[email protected]"]["path"]
     redactFileName = logs_path.split('/')[-1]
     nonredactFileName = logs_path.split('/')[-1].replace('-redacted', '')
     remotepath = logs_path[0:logs_path.rfind('/') + 1]
     self.verify_log_files_exist(remotepath=remotepath,
                                 redactFileName=redactFileName,
                                 nonredactFileName=nonredactFileName)
     self.verify_log_redaction(remotepath=remotepath,
                               redactFileName=redactFileName,
                               nonredactFileName=nonredactFileName,
                               logFileName="ns_server.fts.log")
예제 #23
0
    def calculate_zones_with_replica(self, index=None):
        zones_list = self.rest.get_all_zones_info()
        zones_with_replica = 0
        for zone in zones_list['groups']:
            replica_found = False
            nodes = zone['nodes']
            for node in nodes:
                if replica_found:
                    break
                if 'fts' in node['services']:
                    hostname = node['hostname'][0:node['hostname'].find(":")]
                    for fts_node in self._cb_cluster.get_fts_nodes():
                        if fts_node.ip == hostname:
                            rest_client = RestConnection(fts_node)
                            _, num_pindexes = rest_client.get_fts_stats(
                                index_name=index.name,
                                bucket_name=index.source_bucket.name,
                                stat_name="num_pindexes_actual")
                            if num_pindexes > 0:
                                replica_found = True
                                zones_with_replica += 1
                                break

        return zones_with_replica
예제 #24
0
    def add_ldap_user(self, server, user_name="bjones"):
        """
        Add an ldap user to CB cluster with RBAC roles
        """
        roles = '''cluster_admin,ro_admin,
                bucket_admin[travel-sample],bucket_full_access[travel-sample],
                data_backup[travel-sample],'''
        if self.base_version < "7.0":
            roles = roles + '''security_admin,
                            data_reader[travel-sample],
                            data_writer[travel-sample],
                            data_dcp_reader[travel-sample],
                            data_monitoring[travel-sample]'''
        else:
            roles = roles + '''security_admin_local,security_admin_external,
                            data_reader[travel-sample:_default:_default],
                            data_writer[travel-sample:_default:_default],
                            data_dcp_reader[travel-sample:_default:_default],
                            data_monitoring[travel-sample:_default:_default]'''

        payload = "name={0}&roles={1}".format(user_name, roles)
        log.info("User name -- {0} :: Roles -- {1}".format(user_name, roles))
        rest = RestConnection(server)
        rest.add_external_user(user_name, payload)
    def test_volume(self):
        nodes_in_cluster = [self.servers[0]]
        print "Start Time: %s" % str(
            time.strftime("%H:%M:%S", time.gmtime(time.time())))

        ########################################################################################################################
        self.log.info("Add a N1QL/Index nodes")
        self.query_node = self.servers[1]
        rest = RestConnection(self.query_node)
        rest.set_data_path(data_path=self.query_node.data_path,
                           index_path=self.query_node.index_path,
                           cbas_path=self.query_node.cbas_path)
        result = self.add_node(self.query_node, rebalance=False)
        self.assertTrue(result, msg="Failed to add N1QL/Index node.")

        self.log.info("Add a KV nodes")
        result = self.add_node(self.servers[2],
                               services=["kv"],
                               rebalance=True)
        self.assertTrue(result, msg="Failed to add KV node.")

        nodes_in_cluster = nodes_in_cluster + [
            self.servers[1], self.servers[2]
        ]
        ########################################################################################################################
        self.log.info("Step 2: Create Couchbase buckets.")
        self.create_required_buckets()
        for node in nodes_in_cluster:
            NodeHelper.do_a_warm_up(node)
            NodeHelper.wait_service_started(node)
        ########################################################################################################################
        self.log.info(
            "Step 3: Create 10M docs average of 1k docs for 8 couchbase buckets."
        )
        env = DefaultCouchbaseEnvironment.builder().mutationTokensEnabled(
            True).computationPoolSize(5).socketConnectTimeout(
                100000).connectTimeout(100000).maxRequestLifetime(
                    TimeUnit.SECONDS.toMillis(300)).build()
        cluster = CouchbaseCluster.create(env, self.master.ip)
        cluster.authenticate("Administrator", "password")
        bucket = cluster.openBucket("GleambookUsers")

        pool = Executors.newFixedThreadPool(5)
        items_start_from = 0
        total_num_items = self.input.param("num_items", 5000)

        executors = []
        num_executors = 5
        doc_executors = 5
        num_items = total_num_items / num_executors
        for i in xrange(doc_executors):
            executors.append(
                GleambookUser_Docloader(bucket,
                                        num_items,
                                        items_start_from + i * num_items,
                                        batch_size=2000))
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        updates_from = items_start_from
        deletes_from = items_start_from + total_num_items / 10
        items_start_from += total_num_items
        ########################################################################################################################
        self.sleep(120, "Sleeping after 1st cycle.")
        self.log.info("Step 8: Delete 1M docs. Update 1M docs.")
        pool = Executors.newFixedThreadPool(5)
        num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 4

        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, updates_from,
                                    "update"))
        executors.append(
            GleambookUser_Docloader(bucket, num_items / 10, deletes_from,
                                    "delete"))
        futures = pool.invokeAll(executors)
        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)

        ########################################################################################################################
        self.sleep(120, "Sleeping after 2nd cycle.")
        pool = Executors.newFixedThreadPool(5)
        num_items = self.input.param("num_items", 5000)
        executors = []
        num_executors = 5
        doc_executors = 5
        num_items = total_num_items / doc_executors

        for i in xrange(doc_executors):
            executors.append(
                GleambookUser_Docloader(bucket,
                                        num_items,
                                        items_start_from + i * num_items,
                                        batch_size=2000))
        rebalance = self.cluster.async_rebalance(nodes_in_cluster,
                                                 [self.servers[3]], [])
        futures = pool.invokeAll(executors)

        for future in futures:
            print future.get(num_executors, TimeUnit.SECONDS)
        print "Executors completed!!"
        shutdown_and_await_termination(pool, num_executors)
        rebalance.get_result()
        reached = RestHelper(self.rest).rebalance_reached(wait_step=120)
        self.assertTrue(reached, "rebalance failed, stuck or did not complete")

        bucket.close()
        cluster.disconnect()

        print "End Time: %s" % str(
            time.strftime("%H:%M:%S", time.gmtime(time.time())))
 def setUp(self, add_defualt_cbas_node=True):
     self.input = TestInputSingleton.input
     self.input.test_params.update({"default_bucket": False})
     BaseTestCase.setUp(self)
     self.rest = RestConnection(self.master)
 def run_n1ql_query(self, query="", node=None):
     if not node:
         node = self._find_n1ql_node()
     res = RestConnection(node).query_tool(query)
     return res
 def _get_services_map(self):
     rest = RestConnection(self.nodes[0])
     return rest.get_nodes_services()
 def cleanup_eventing(self):
     ev_node = self.get_nodes_from_services_map(service_type="eventing",
                                                get_all_nodes=False)
     ev_rest = RestConnection(ev_node)
     log.info("Running eventing cleanup api...")
     ev_rest.cleanup_eventing()
 def refresh_rest_server(self):
     eventing_nodes_list = self.get_nodes_from_services_map(
         service_type="eventing", get_all_nodes=True)
     self.restServer = eventing_nodes_list[0]
     self.rest = RestConnection(self.restServer)
     return len(eventing_nodes_list)