Exemple #1
0
 def print_eventing_stats_from_all_eventing_nodes(self):
     eventing_nodes = self.get_nodes_from_services_map(service_type="eventing", get_all_nodes=True)
     for eventing_node in eventing_nodes:
         rest_conn = RestConnection(eventing_node)
         out = rest_conn.get_all_eventing_stats()
         log.info("Stats for Node {0} is \n{1} ".format(eventing_node.ip, json.dumps(out, sort_keys=True,
                                                                                   indent=4)))
 def test_function_where_handler_code_takes_more_time_to_execute_than_execution_timeout(self):
     keys = ['customer123', 'customer1234', 'customer12345']
     url = 'couchbase://{ip}/{name}'.format(ip=self.master.ip, name=self.src_bucket_name)
     bucket = Bucket(url, username="******", password="******")
     for doc_id in keys:
         bucket.upsert(doc_id, {'name' : doc_id})
     # create a function which sleeps for 5 secs and set execution_timeout to 1s
     body = self.create_save_function_body(self.function_name, HANDLER_CODE_ERROR.EXECUTION_TIME_MORE_THAN_TIMEOUT,
                                           execution_timeout=1)
     # deploy the function
     self.deploy_function(body)
     # This is intentionally added so that we wait for some mutations to process and we decide none are processed
     self.sleep(60)
     # No docs should be present in dst_bucket as the all the function executions should have timed out
     self.verify_eventing_results(self.function_name, 0, skip_stats_validation=True)
     eventing_nodes = self.get_nodes_from_services_map(service_type="eventing", get_all_nodes=True)
     exec_timeout_count = 0
     for eventing_node in eventing_nodes:
         rest_conn = RestConnection(eventing_node)
         out = rest_conn.get_all_eventing_stats()
         # get sum of all timeout_count
         exec_timeout_count += out[0]["failure_stats"]["timeout_count"]
     # check whether all the function executions timed out and is equal to number of docs created
     if exec_timeout_count != len(keys):
         self.fail("Not all event executions timed out : Expected : {0} Actual : {1}".format(len(keys),
                                                                                             exec_timeout_count))
     self.undeploy_and_delete_function(body)
Exemple #3
0
    def test_maxttl_with_doc_updates(self):
        """
        1. Create a bucket with ttl = 60s
        2. Upload 1000 docs with exp = 40s
        3. After 20s, Update docs with exp = 60s
        4. After 40s, run expiry pager again and get item count, must be 1000
        5. After 20s, run expiry pager again and get item count, must be 0
        """
        rest = RestConnection(self.master)
        for bucket in self.buckets:
            self._load_json(bucket, self.num_items, exp=40)

        self.sleep(20, "waiting to update docs with exp=60s...")

        for bucket in self.buckets:
            self._load_json(bucket, self.num_items, exp=60)

        self.sleep(40, "waiting before running expiry pager...")
        self.expire_pager(self.servers)
        for bucket in self.buckets:
            items = rest.get_active_key_count(bucket)
            self.log.info("Items: {0}".format(items))
            if items != self.num_items:
                self.fail("FAIL: Docs with updated expiry deleted unexpectedly!")

        self.sleep(20, "waiting before running expiry pager...")
        self.expire_pager(self.servers)
        self.sleep(20, "waiting for item count to come down...")
        for bucket in self.buckets:
            items = rest.get_active_key_count(bucket)
            self.log.info("Items: {0}".format(items))
            if items != 0:
                self.fail("FAIL: Docs with updated expiry not deleted after new exp has elapsed!")
Exemple #4
0
    def customize_xdcr_settings(self):
        """Set custom XDCR environment variables"""
        max_concurrent_reps_per_doc = self.param('max_concurrent_reps_per_doc', None)
        xdcr_doc_batch_size_kb = self.param('xdcr_doc_batch_size_kb', None)
        xdcr_checkpoint_interval = self.param('xdcr_checkpoint_interval', None)
        xdcr_latency_optimization = self.param('xdcr_latency_optimization', None)

        if max_concurrent_reps_per_doc:
            param = 'xdcrMaxConcurrentReps'
            value = max_concurrent_reps_per_doc
        elif xdcr_doc_batch_size_kb:
            param = 'xdcrDocBatchSizeKb'
            value = xdcr_doc_batch_size_kb
        elif xdcr_checkpoint_interval:
            param = 'xdcrCheckpointInterval'
            value = xdcr_checkpoint_interval
        else:
            return

        self.log.info("changing {0} to {1}".format(param, value))

        for servers in self.input.clusters.values():
            rest_conn = RestConnection(servers[0])
            replications = rest_conn.get_replications()
            for repl in replications:
                src_bucket = repl.get_src_bucket()
                dst_bucket = repl.get_dest_bucket()
                rest_conn.set_xdcr_param(src_bucket.name, dst_bucket.name, param, value)
Exemple #5
0
    def start_replication(self, master, slave, replication_type='continuous',
                          buckets=None, bidir=False, suffix='A'):
        """Add remote cluster and start replication"""

        master_rest_conn = RestConnection(master)
        remote_reference = 'remote_cluster_' + suffix

        master_rest_conn.add_remote_cluster(slave.ip, slave.port,
                                            slave.rest_username,
                                            slave.rest_password,
                                            remote_reference)

        if not buckets:
            buckets = self.get_buckets()
        else:
            buckets = self.get_buckets(reversed=True)

        for bucket in buckets:
            master_rest_conn.start_replication(replication_type, bucket,
                                               remote_reference)

        if self.parami('num_buckets', 1) > 1 and suffix == 'A':
            self.start_replication(slave, master, replication_type, buckets,
                                   suffix='B')

        if bidir:
            self.start_replication(slave, master, replication_type, buckets,
                                   suffix='B')
Exemple #6
0
 def get_bucket_size(self, interval=60):
     self._task["bucket_size"] = []
     retries = 0
     nodes_iterator = (node for node in self.nodes)
     node = nodes_iterator.next()
     rest = RestConnection(node)
     while not self._aborted():
         time.sleep(interval)
         log.info("collecting bucket size stats")
         try:
             status, db_size = rest.get_database_disk_size(self.bucket)
             if status:
                 self._task["bucket_size"].append(db_size)
         except IndexError, e:
             retries += 1
             log.error("unable to get bucket size {0}: {1}"
                       .format(self.bucket, e))
             log.warning("retries: {0} of {1}".format(retries, RETRIES))
             if retries == RETRIES:
                 try:
                     node = nodes_iterator.next()
                     rest = RestConnection(node)
                     retries = 0
                 except StopIteration:
                     log.error("no nodes available: stop collecting bucket_size")
                     return
Exemple #7
0
    def measure_indexing_throughput(self, nodes):
        self._task['indexer_info'] = list()
        indexers = defaultdict(dict)
        while not self._aborted():
            time.sleep(15)  # 15 seconds by default

            # Grab indexer tasks from all nodes
            tasks = list()
            for node in nodes:
                rest = RestConnection(node)
                tasks.extend(filter(lambda t: t['type'] == 'indexer',
                                    rest.active_tasks()))

            # Calculate throughput for every unique PID
            thr = 0
            for task in tasks:
                uiid = task['pid'] + str(task['started_on'])

                changes_delta = \
                    task['changes_done'] - indexers[uiid].get('changes_done', 0)
                time_delta = \
                    task['updated_on'] - indexers[uiid].get('updated_on',
                                                            task['started_on'])
                if time_delta:
                    thr += changes_delta / time_delta
                indexers[uiid]['changes_done'] = task['changes_done']
                indexers[uiid]['updated_on'] = task['updated_on']

            # Average throughput
            self._task['indexer_info'].append({
                'indexing_throughput': thr,
                'timestamp': time.time()
            })
Exemple #8
0
    def get_and_validate_latest_checkpoint(self):
        rest_con = RestConnection(self.src_master)
        try:
            checkpoint_record = rest_con.get_recent_xdcr_vb_ckpt('default')
            self.log.info("Checkpoint record : {}".format(checkpoint_record))
            self.chkpt_records.append(checkpoint_record)
        except Exception as e:
            raise XDCRCheckpointException("Error retrieving last checkpoint document - {}".format(e))

        failover_uuid = checkpoint_record["failover_uuid"]
        seqno = checkpoint_record["seqno"]

        self.log.info ("Verifying commitopaque/remote failover log ...")
        if seqno != 0:
            self.validate_remote_failover_log(checkpoint_record["target_vb_opaque"]["target_vb_uuid"], checkpoint_record["target_seqno"])
            self.log.info ("Verifying local failover uuid ...")
            local_vb_uuid, _ = self.get_failover_log(self.src_master)
            self.assertTrue((int(failover_uuid) == int(local_vb_uuid)) or
                            (int(failover_uuid) == 0),
                        "local failover_uuid is wrong in checkpoint record! Expected: {0} seen: {1}".
                        format(local_vb_uuid,failover_uuid))
            self.log.info("Checkpoint record verified")
        else:
            self.log.info("Skipping checkpoint record checks for checkpoint-0")
        return True
Exemple #9
0
    def ns_server_stats(self, interval=60):
        self._task["ns_server_stats"] = []
        self._task["ns_server_stats_system"] = []
        nodes_iterator = (node for node in self.nodes)
        node = nodes_iterator.next()
        retries = 0
        not_null = lambda v: v if v is not None else 0

        rest = RestConnection(node)
        while not self._aborted():
            time.sleep(interval)
            log.info("collecting ns_server_stats")
            try:
                # Bucket stats
                ns_server_stats = rest.fetch_bucket_stats(bucket=self.bucket)
                for key, value in ns_server_stats["op"]["samples"].iteritems():
                    ns_server_stats["op"]["samples"][key] = not_null(value)
                self._task["ns_server_stats"].append(ns_server_stats)
                # System stats
                ns_server_stats_system = rest.fetch_system_stats()
                self._task["ns_server_stats_system"].append(ns_server_stats_system)
            except (ValueError, TypeError), e:
                retries += 1
                log.error("unable to parse json object {0}: {1}".format(node, e))
                log.warning("retries: {0} of {1}".format(retries, RETRIES))
                if retries == RETRIES:
                    try:
                        node = nodes_iterator.next()
                        rest = RestConnection(node)
                        retries = 0
                    except StopIteration:
                        log.error("no nodes available: stop collecting ns_server_stats")
                        return
    def execute_statement_on_cbas_via_rest(self, statement, mode=None, rest=None, timeout=120, client_context_id=None, username=None, password=None):
        """
        Executes a statement on CBAS using the REST API using REST Client
        """
        pretty = "true"
        if not rest:
            rest = RestConnection(self.cbas_node)
        try:
            self.log.info("Running query on cbas: %s"%statement)
            response = rest.execute_statement_on_cbas(statement, mode, pretty,
                                                      timeout, client_context_id, username, password)
            response = json.loads(response)
            if "errors" in response:
                errors = response["errors"]
            else:
                errors = None

            if "results" in response:
                results = response["results"]
            else:
                results = None

            if "handle" in response:
                handle = response["handle"]
            else:
                handle = None
            
            return response["status"], response[
                "metrics"], errors, results, handle

        except Exception,e:
            raise Exception(str(e))
Exemple #11
0
    def get_and_validate_latest_checkpoint(self):
        rest_con = RestConnection(self.src_master)
        try:
            checkpoint_record = rest_con.get_recent_xdcr_vb_ckpt('default', 'default', '0')
            self.log.info("Checkpoint record : {}".format(checkpoint_record))
            self.chkpt_records.append(checkpoint_record)
        except Exception as e:
            raise XDCRCheckpointException("Error retrieving last checkpoint document - {}".format(e))

        commit_opaque = checkpoint_record["commitopaque"]
        failover_uuid = checkpoint_record["failover_uuid"]

        upr_snap_seqno = checkpoint_record["upr_snapshot_seqno"]
        seqno = checkpoint_record["seqno"]
        start_time = checkpoint_record["start_time"]
        total_docs_checked = checkpoint_record["total_docs_checked"]
        total_docs_written = checkpoint_record["total_docs_written"]
        total_data_repl = checkpoint_record["total_data_replicated"]
        end_time = checkpoint_record["end_time"]

        self.log.info ("Verifying commitopaque/remote failover log ...")
        if seqno != 0:
            self.validate_remote_failover_log(commit_opaque[0], commit_opaque[1])
            self.log.info ("Verifying last checkpointed seqno ...")
            self.validate_last_checkpointed_seqno(int(seqno))
            self.log.info ("Verifying local failover uuid ...")
            local_vb_uuid, _ = self.get_failover_log(self.src_master)
            self.assertTrue(int(local_vb_uuid) == int(failover_uuid),
                        "local failover_uuid is wrong in checkpoint record! Expected: {0} seen: {1}".
                        format(local_vb_uuid,failover_uuid))
            self.log.info("Checkpoint record verified")
        else:
            self.log.info("Skipping checkpoint record checks for checkpoint-0")
        return True
 def test_function_where_handler_code_takes_more_time_to_execute_than_execution_timeout(self):
     # Note to Self : Never use SDK's unless you really have to. It is difficult to upgrade or maintain correct
     # sdk versions on the slaves. Scripts will be notoriously unreliable when you run on jenkins slaves.
     num_docs = 10
     values = ['1', '10']
     # create 10 non json docs on source bucket
     gen_load_non_json = JSONNonDocGenerator('non_json_docs', values, start=0, end=num_docs)
     self.cluster.load_gen_docs(self.master, self.src_bucket_name, gen_load_non_json, self.buckets[0].kvs[1],
                                'create', compression=self.sdk_compression)
     # create a function which sleeps for 5 secs and set execution_timeout to 1s
     body = self.create_save_function_body(self.function_name, HANDLER_CODE_ERROR.EXECUTION_TIME_MORE_THAN_TIMEOUT,
                                           execution_timeout=1)
     # deploy the function
     self.deploy_function(body)
     # This is intentionally added so that we wait for some mutations to process and we decide none are processed
     self.sleep(60)
     # No docs should be present in dst_bucket as the all the function executions should have timed out
     self.verify_eventing_results(self.function_name, 0, skip_stats_validation=True)
     eventing_nodes = self.get_nodes_from_services_map(service_type="eventing", get_all_nodes=True)
     exec_timeout_count = 0
     for eventing_node in eventing_nodes:
         rest_conn = RestConnection(eventing_node)
         out = rest_conn.get_all_eventing_stats()
         # get sum of all timeout_count
         exec_timeout_count += out[0]["failure_stats"]["timeout_count"]
     # check whether all the function executions timed out and is equal to number of docs created
     if exec_timeout_count != num_docs:
         self.fail("Not all event executions timed out : Expected : {0} Actual : {1}".format(len(keys),
                                                                                             exec_timeout_count))
     self.undeploy_and_delete_function(body)
    def test_gsi_with_flush_bucket_redaction_enabled(self):
        # load bucket and do some ops
        self.set_indexer_logLevel("trace")
        json_generator = JsonGenerator()
        gen_docs = json_generator.generate_all_type_documents_for_gsi(docs_per_day=self.doc_per_day, start=0)
        full_docs_list = self.generate_full_docs_list(gen_docs)
        n1ql_helper = N1QLHelper(use_rest=True, buckets=self.buckets, full_docs_list=full_docs_list,
                                      log=log, input=self.input, master=self.master)
        self.load(gen_docs)
        n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
        query_definition_generator = SQLDefinitionGenerator()
        query_definitions = query_definition_generator.generate_airlines_data_query_definitions()
        query_definitions = query_definition_generator.filter_by_group("all", query_definitions)
        # set log redaction level, collect logs, verify log files exist and verify them for redaction
        self.set_redaction_level()
        self.start_logs_collection()
        # Create partial Index
        for query_definition in query_definitions:
            for bucket in self.buckets:
                create_query = query_definition.generate_index_create_query(bucket.name)
                n1ql_helper.run_cbq_query(query=create_query, server=n1ql_node)

        for query_definition in query_definitions:
            for bucket in self.buckets:
                scan_query = query_definition.generate_query(bucket=bucket.name)
                n1ql_helper.run_cbq_query(query=scan_query, server=n1ql_node)

        rest = RestConnection(self.master)
        rest.flush_bucket(self.buckets[0].name)

        self.sleep(10)
        self.load(gen_docs, buckets=[self.buckets[0]])

        for query_definition in query_definitions:
            for bucket in self.buckets:
                scan_query = query_definition.generate_query(bucket=bucket.name)
                n1ql_helper.run_cbq_query(query=scan_query, server=n1ql_node)

        for query_definition in query_definitions:
            for bucket in self.buckets:
                drop_query = query_definition.generate_index_drop_query(bucket=bucket.name)
                n1ql_helper.run_cbq_query(query=drop_query, server=n1ql_node)
        result = self.monitor_logs_collection()
        log.info(result)
        try:
            logs_path = result["perNode"]["ns_1@" + str(self.master.ip)]["path"]
        except KeyError:
            logs_path = result["perNode"]["[email protected]"]["path"]
        redactFileName = logs_path.split('/')[-1]
        nonredactFileName = logs_path.split('/')[-1].replace('-redacted', '')
        remotepath = logs_path[0:logs_path.rfind('/') + 1]
        log_file = self.input.param("log_file_name", "indexer.log")
        self.verify_log_files_exist(remotepath=remotepath,
                                    redactFileName=redactFileName,
                                    nonredactFileName=nonredactFileName)
        self.verify_log_redaction(remotepath=remotepath,
                                  redactFileName=redactFileName,
                                  nonredactFileName=nonredactFileName,
                                  logFileName="ns_server.{0}".format(log_file))
Exemple #14
0
 def wait_for_xdc_replication(self):
     rest = RestConnection(self.input.servers[0])
     bucket = self.param('bucket', 'default')
     while True:  # we have to wait at least once
         print "Waiting for XDC replication to finish"
         time.sleep(15)
         if not rest.get_xdc_queue_size(bucket):
             break
Exemple #15
0
 def print_go_routine_dump_from_all_eventing_nodes(self):
     eventing_nodes = self.get_nodes_from_services_map(service_type="eventing", get_all_nodes=True)
     for eventing_node in eventing_nodes:
         rest_conn = RestConnection(eventing_node)
         out = rest_conn.get_eventing_go_routine_dumps()
         log.info("Go routine dumps for Node {0} is \n{1} ======================================================"
                  "============================================================================================="
                  "\n\n".format(eventing_node.ip, out))
 def get_stat_successful_checkpoints(self):
     """
     Get num_checkpoints xdcr stat for default replication
     """
     rest = RestConnection(self.src_master)
     repl = rest.get_replication_for_buckets('default', 'default')
     val = rest.fetch_bucket_xdcr_stats()['op']['samples']['replications/'+repl['id']+'/num_checkpoints']
     return int(val[-1])
Exemple #17
0
    def start(self, nodes, bucket, pnames, name, client_id='',
              collect_server_stats=True, ddoc=None, clusters=None):
        """This function starts collecting stats from all nodes with the given
        interval"""
        self._task = {"state": "running", "threads": [], "name": name,
                      "time": time.time(), "ops": [], "totalops": [],
                      "ops-temp": [], "latency": {}, "data_size_stats": []}
        rest = RestConnection(nodes[0])
        info = rest.get_nodes_self()
        self.data_path = info.storage[0].get_data_path()
        self.client_id = str(client_id)
        self.nodes = nodes
        self.bucket = bucket

        if collect_server_stats:
            self._task["threads"].append(
                Thread(target=self.membase_stats, name="membase")
            )
            self._task["threads"].append(
                Thread(target=self.system_stats, name="system", args=(pnames, ))
            )
            self._task["threads"].append(
                Thread(target=self.iostats, name="iostats")
            )
            self._task["threads"].append(
                Thread(target=self.ns_server_stats, name="ns_server")
            )
            self._task["threads"].append(
                Thread(target=self.get_bucket_size, name="bucket_size")
            )
            self._task["threads"].append(
                Thread(target=self.rebalance_progress, name="rebalance_progress")
            )
            if ddoc is not None:
                self._task["threads"].append(
                    Thread(target=self.indexing_time_stats, name="index_time", args=(ddoc, ))
                )
                self._task["threads"].append(
                    Thread(target=self.indexing_throughput_stats, name="index_thr")
                )
            if clusters:
                self.clusters = clusters
                self._task["threads"].append(
                    Thread(target=self.xdcr_lag_stats, name="xdcr_lag_stats")
                )

            for thread in self._task["threads"]:
                thread.daemon = True
                thread.start()

            # Getting build/machine stats from only one node in the cluster
            self.build_stats(nodes)
            self.machine_stats(nodes)

            # Start atop
            self.start_atop()
Exemple #18
0
 def tearDown(self):
     # catch panics and print it in the test log
     self.check_eventing_logs_for_panic()
     rest = RestConnection(self.master)
     buckets = rest.get_buckets()
     for bucket in buckets:
         stats = rest.get_bucket_stats(bucket)
         self.log.info("Bucket {} DGM is {}".format(
             bucket, stats["vb_active_resident_items_ratio"]))
     super(EventingBaseTest, self).tearDown()
Exemple #19
0
 def setUp(self):
     super(IndexManagementAPI, self).setUp()
     self.rest = RestConnection(self._cb_cluster.get_master_node())
     self.fts_rest = RestConnection(self._cb_cluster.get_random_fts_node())
     self.sample_bucket_name = "travel-sample"
     self.sample_index_name = "idx_travel_sample_fts"
     self.sample_index_name_1 = "idx_travel_sample_fts1"
     self.second_index = self._input.param("second_index", None)
     self.run_in_parallel = self._input.param("run_in_parallel", None)
     self.sample_query = {"match": "United States", "field": "country"}
Exemple #20
0
 def setup_ldap_config(self, server, param=None):
     """
     Setups ldap configuration on couchbase
     :param: (optional) - ldap config parameters
     :server: server to make REST connection to setup ldap on CB cluster
     """
     if param is None:
         param = self.get_ldap_params()
     rest = RestConnection(server)
     rest.setup_ldap(param, '')
 def print_go_routine_dump_from_all_eventing_nodes(self):
     eventing_nodes = self.get_nodes_from_services_map(
         service_type="eventing", get_all_nodes=True)
     for eventing_node in eventing_nodes:
         rest_conn = RestConnection(eventing_node)
         out = rest_conn.get_eventing_go_routine_dumps()
         log.info(
             "Go routine dumps for Node {0} is \n{1} ======================================================"
             "============================================================================================="
             "\n\n".format(eventing_node.ip, out))
 def rebalance_out_activevb0_node(self, master):
     pre_rebalance_uuid, _ = self.get_failover_log(master)
     self.log.info("Starting rebalance-out ...")
     # find which node contains vb0
     node = self.get_active_vb0_node(master)
     self.log.info("Node {0} contains active vb0".format(node))
     if node == self.src_master:
         self.src_cluster.rebalance_out_master()
         if master == node and node in self.src_nodes:
             self.src_nodes.remove(self.src_master)
         self.src_master = self.src_nodes[0]
         post_rebalance_uuid, _ = self.get_failover_log(
             self.get_active_vb0_node(self.src_master))
         self.log.info(
             "Remote uuid before rebalance :{0}, after rebalance : {1}".
             format(pre_rebalance_uuid, post_rebalance_uuid))
         # source rebalance on tap?
         if RestConnection(
                 self.src_master).get_internal_replication_type() == 'tap':
             self.assertTrue(
                 int(pre_rebalance_uuid) != int(post_rebalance_uuid),
                 "vb_uuid of vb0 is same before and after TAP rebalance")
         else:
             self.log.info("Current internal replication = UPR,hence vb_uuid did not change," \
                       "Subsequent _commit_for_checkpoints are expected to pass")
         self.sleep(self._wait_timeout)
         self.verify_next_checkpoint_passes()
     else:
         self.dest_cluster.rebalance_out_master()
         if master == node and node in self.dest_nodes:
             self.dest_nodes.remove(self.dest_master)
         self.dest_master = self.dest_nodes[0]
         post_rebalance_uuid, _ = self.get_failover_log(
             self.get_active_vb0_node(self.dest_master))
         self.log.info(
             "Remote uuid before rebalance :{0}, after rebalance : {1}".
             format(pre_rebalance_uuid, post_rebalance_uuid))
         # destination rebalance on tap?
         if RestConnection(
                 self.dest_master).get_internal_replication_type() == 'tap':
             self.assertTrue(
                 int(pre_rebalance_uuid) != int(post_rebalance_uuid),
                 "vb_uuid of vb0 is same before and after TAP rebalance")
             self.read_chkpt_history_new_vb0node()
             self.verify_next_checkpoint_fails_after_dest_uuid_change()
             self.sleep(self._wait_timeout * 2)
             self.verify_next_checkpoint_passes()
         else:
             self.log.info("Current internal replication = UPR,hence destination vb_uuid did not change," \
                       "Subsequent _commit_for_checkpoints are expected to pass")
             self.read_chkpt_history_new_vb0node()
             self.mutate_and_check_error404()
             # the replicator might still be awake, ensure adequate time gap
             self.sleep(self._wait_timeout * 2)
             self.verify_next_checkpoint_passes()
    def collect_stats(self):
        # TODO: fix hardcoded cluster names

        # Initialize rest connection to master and slave servers
        master_rest_conn = RestConnection(self.input.clusters[0][0])
        slave_rest_conn = RestConnection(self.input.clusters[1][0])

        # Define list of metrics and stats containers
        metrics = ('mem_used', 'curr_items', 'vb_active_ops_create',
                   'ep_bg_fetched', 'cpu_utilization_rate')
        stats = {'slave': defaultdict(list), 'master': defaultdict(list)}

        # Calculate approximate number of relicated items per node
        num_nodes = self.parami('num_nodes', 1) // 2
        total_items = self.parami('items', 1000000)
        items = 0.99 * total_items // num_nodes

        # Get number of relicated items
        curr_items = self.get_samples(slave_rest_conn)['curr_items']

        # Collect stats until all items are replicated
        while curr_items[-1] < items:
            # Collect stats every 20 seconds
            time.sleep(19)

            # Slave stats
            samples = self.get_samples(slave_rest_conn)
            for metric in metrics:
                stats['slave'][metric].extend(samples[metric][:20])

            # Master stats
            samples = self.get_samples(master_rest_conn, 'nirvana')
            for metric in metrics:
                stats['master'][metric].extend(samples[metric][:20])

            # Update number of replicated items
            curr_items = stats['slave']['curr_items']

        # Aggregate and display stats
        vb_active_ops_create = sum(stats['slave']['vb_active_ops_create']) /\
            len(stats['slave']['vb_active_ops_create'])
        print("slave> AVG vb_active_ops_create: {0}, items/sec"\
            .format(vb_active_ops_create))

        ep_bg_fetched = sum(stats['slave']['ep_bg_fetched']) /\
            len(stats['slave']['ep_bg_fetched'])
        print("slave> AVG ep_bg_fetched: {0}, reads/sec".format(ep_bg_fetched))

        for server in stats:
            mem_used = max(stats[server]['mem_used'])
            print("{0}> MAX memory used: {1}, MB".format(
                server, mem_used // 1024**2))
            cpu_rate = sum(stats[server]['cpu_utilization_rate']) /\
                len(stats[server]['cpu_utilization_rate'])
            print("{0}> AVG CPU rate: {1}, %".format(server, cpu_rate))
 def setUp(self):
     super(EventingBucketCache, self).setUp()
     self.rest.set_service_memoryQuota(service='memoryQuota',
                                       memoryQuota=1400)
     if self.create_functions_buckets:
         self.bucket_size = 250
         log.info(self.bucket_size)
         bucket_params = self._create_bucket_params(
             server=self.server,
             size=self.bucket_size,
             replicas=self.num_replicas)
         self.cluster.create_standard_bucket(name=self.src_bucket_name,
                                             port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.src_bucket = RestConnection(self.master).get_buckets()
         self.cluster.create_standard_bucket(name=self.dst_bucket_name,
                                             port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.cluster.create_standard_bucket(name=self.metadata_bucket_name,
                                             port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.cluster.create_standard_bucket(name=self.dst_bucket_name1,
                                             port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.buckets = RestConnection(self.master).get_buckets()
     self.gens_load = self.generate_docs(self.docs_per_day)
     self.expiry = 3
     handler_code = self.input.param('handler_code', 'bucket_op')
     # index is required for delete operation through n1ql
     self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
     self.n1ql_helper = N1QLHelper(shell=self.shell,
                                   max_verify=self.max_verify,
                                   buckets=self.buckets,
                                   item_flag=self.item_flag,
                                   n1ql_port=self.n1ql_port,
                                   full_docs_list=self.full_docs_list,
                                   log=self.log,
                                   input=self.input,
                                   master=self.master,
                                   use_rest=True)
     self.n1ql_helper.create_primary_index(using_gsi=True,
                                           server=self.n1ql_node)
     if self.non_default_collection:
         self.create_scope_collection(bucket=self.src_bucket_name,
                                      scope=self.src_bucket_name,
                                      collection=self.src_bucket_name)
         self.create_scope_collection(bucket=self.metadata_bucket_name,
                                      scope=self.metadata_bucket_name,
                                      collection=self.metadata_bucket_name)
         self.create_scope_collection(bucket=self.dst_bucket_name,
                                      scope=self.dst_bucket_name,
                                      collection=self.dst_bucket_name)
         self.create_scope_collection(bucket=self.dst_bucket_name1,
                                      scope=self.dst_bucket_name1,
                                      collection=self.dst_bucket_name1)
 def get_disk_usage(self, node=None, index=None):
     rest_client = RestConnection(node)
     _, reclaimable_space = rest_client.get_fts_stats(
         index_name=index.name,
         bucket_name=index.source_bucket.name,
         stat_name="num_bytes_used_disk_by_root_reclaimable")
     _, num_root_filesegments = rest_client.get_fts_stats(
         index_name=index.name,
         bucket_name=index.source_bucket.name,
         stat_name="num_root_filesegments")
     return reclaimable_space, num_root_filesegments
 def __init__(self, servers, master):
     if servers == None or master == None:
         return
     self.servers = servers
     self.master = master
     self.eventing_nodes = self.get_nodes_from_services_map(
         service_type="eventing",
         servers=servers,
         master=master,
         get_all_nodes=True)
     self.eventing_rest = RestConnection(self.eventing_nodes[0])
Exemple #27
0
 def delete_request(self, client_context_id):
     """
     Deletes a request from CBAS
     """
     rest = RestConnection(self.cbas_node)
     try:
         status = rest.delete_active_request_on_cbas(client_context_id)
         self.log.info(status)
         return status
     except Exception, e:
         raise Exception(str(e))
Exemple #28
0
 def get_stats_value(self,name,expression):
     eventing_nodes = self.get_nodes_from_services_map(service_type="eventing", get_all_nodes=True)
     total_count=0
     for eventing_node in eventing_nodes:
         rest_conn = RestConnection(eventing_node)
         stats = rest_conn.get_all_eventing_stats()
         keys=expression.split(".")
         for stat in stats:
             if stat["function_name"] == name:
                 total_count=total_count + stat[keys[0]][keys[1]]
     return total_count
 def delete_request(self, client_context_id):
     """
     Deletes a request from CBAS
     """
     rest = RestConnection(self.cbas_node)
     try:
         status = rest.delete_active_request_on_cbas(client_context_id)
         self.log.info (status)
         return status
     except Exception, e:
         raise Exception(str(e))
 def verify_eventing_results_of_all_functions(self,
                                              docs_expected,
                                              verify_results=True,
                                              timeout=600):
     if verify_results:
         # Verify the results of all the buckets
         self.verify_eventing_results(self.function_name,
                                      docs_expected,
                                      skip_stats_validation=True,
                                      timeout=timeout)
         self.verify_eventing_results(self.function_name,
                                      docs_expected,
                                      skip_stats_validation=True,
                                      bucket=self.dst_bucket_name1,
                                      timeout=timeout)
         self.verify_eventing_results(self.function_name,
                                      docs_expected,
                                      skip_stats_validation=True,
                                      bucket=self.dst_bucket_name2,
                                      timeout=timeout)
         self.verify_eventing_results(self.function_name,
                                      docs_expected,
                                      skip_stats_validation=True,
                                      bucket=self.dst_bucket_name3,
                                      timeout=timeout)
         if docs_expected == 0:
             self.verify_source_bucket_mutation(docs_expected,
                                                deletes=True,
                                                timeout=timeout,
                                                bucket=self.sbm_bucket)
         else:
             self.verify_source_bucket_mutation(docs_expected,
                                                timeout=timeout,
                                                bucket=self.sbm_bucket)
     else:
         # Just print the stats after sleeping for 10 mins. Required to get the latest stats.
         self.sleep(timeout)
         eventing_nodes = self.get_nodes_from_services_map(
             service_type="eventing", get_all_nodes=True)
         for eventing_node in eventing_nodes:
             rest_conn = RestConnection(eventing_node)
             out = rest_conn.get_all_eventing_stats()
             log.info("Stats for Node {0} is \n{1} ".format(
                 eventing_node.ip, json.dumps(out, sort_keys=True,
                                              indent=4)))
         for bucket in [
                 self.dst_bucket_name, self.dst_bucket_name1,
                 self.dst_bucket_name2
         ]:
             stats_dst = self.rest.get_bucket_stats(bucket)
             log.info(
                 "Number of docs in {0} bucket actual : {1} expected : {2} "
                 .format(bucket, stats_dst["curr_items"], docs_expected))
    def setUp(self):
        super(FTSReclaimableDiskSpace, self).setUp()

        self.default_group_name = "Group 1"
        self.n1ql = N1QLHelper(version="sherlock",
                               shell=None,
                               item_flag=None,
                               n1ql_port=8903,
                               full_docs_list=[],
                               log=self.log)
        self.rest = RestConnection(self._cb_cluster.get_master_node())
        self._cleanup_server_groups()
Exemple #32
0
def retrieve_data(context, path):
    """Retrieve json data from a couchbase server through REST calls"""
    # TODO: use cbtestlib
    server = Server(context.get("host", "127.0.0.1"))
    rest = RestConnection(server)
    api = rest.baseUrl + path

    try:
        status, content, header = rest._http_request(api)  #TODO: expose
    except ServerUnavailableException, e:
        logging.error("unable to retrieve data from %s: %s" % (server, e))
        return retrieve_meta(context, path)
    def test_set_maxttl_on_existing_bucket(self):
        """
        1. Create a bucket with no max_ttl
        2. Upload 1000 docs with exp = 100s
        3. Set maxTTL on bucket as 60s
        4. After 60s, run expiry pager, get item count, must be 1000
        5. After 40s, run expiry pager again and get item count, must be 0
        6. Now load another set of docs with exp = 100s
        7. Run expiry pager after 60s and get item count, must be 0
        """
        for bucket in self.buckets:
            self._load_json(bucket, self.num_items, exp=100)
        self._update_bucket_maxTTL(maxttl=60)

        self.sleep(60, "waiting before running expiry pager...")
        self.expire_pager(self.servers)
        self.sleep(20, "waiting for item count to come down...")
        for bucket in self.buckets:
            items = RestConnection(self.master).get_active_key_count(bucket)
            self.log.info(
                "Doc expiry set to = 100s, maxTTL = 60s"
                "(set after doc creation), after 60s, item count = {0}".format(
                    items))
            if items != self.num_items:
                self.fail(
                    "FAIL: Items with larger expiry before maxTTL updation deleted!"
                )

        self.sleep(40, "waiting before running expiry pager...")
        self.expire_pager(self.servers)
        self.sleep(20, "waiting for item count to come down...")
        for bucket in self.buckets:
            items = RestConnection(self.master).get_active_key_count(bucket)
            self.log.info("Doc expiry set to = 100s, maxTTL = 60s"
                          "(set after doc creation), after 100s,"
                          " item count = {0}".format(items))
            if items != 0:
                self.fail(
                    "FAIL: Items with not greater expiry set before maxTTL "
                    "updation not deleted after elapsed TTL!")
        for bucket in self.buckets:
            self._load_json(bucket, self.num_items, exp=100)

        self.sleep(60, "waiting before running expiry pager...")
        self.expire_pager(self.servers)
        self.sleep(20, "waiting for item count to come down...")
        for bucket in self.buckets:
            items = RestConnection(self.master).get_active_key_count(bucket)
            self.log.info("Doc expiry set to = 100s, maxTTL = 60s, after 100s,"
                          " item count = {0}".format(items))
            if items != 0:
                self.fail("FAIL: Items with not greater expiry not "
                          "deleted after elapsed maxTTL!")
Exemple #34
0
 def tearDown(self):
     # catch panics and print it in the test log
     self.check_eventing_logs_for_panic()
     rest = RestConnection(self.master)
     buckets = rest.get_buckets()
     for bucket in buckets:
         stats = rest.get_bucket_stats(bucket)
         self.log.info("Bucket {} DGM is {}".format(bucket, stats["vb_active_resident_items_ratio"]))
     self.hostname = self.input.param('host', 'https://postman-echo.com/')
     if self.hostname == 'local':
         self.teardown_curl()
     super(EventingBaseTest, self).tearDown()
Exemple #35
0
def retrieve_data(context, path):
    """Retrieve json data from a couchbase server through REST calls"""
    # TODO: use cbtestlib
    server = Server(context.get("host", "127.0.0.1"))
    rest = RestConnection(server)
    api = rest.baseUrl + path

    try:
        status, content, header = rest._http_request(api)  # TODO: expose
    except ServerUnavailableException, e:
        logging.error("unable to retrieve data from %s: %s" % (server, e))
        return retrieve_meta(context, path)
Exemple #36
0
    def test_maxttl_possible_values(self):
        """
        Test
        1. min - 0
        2. max - 2147483647q
        3. default - 0
        4. negative values, date, string
        """
        # default
        rest = RestConnection(self.master)
        default_maxttl = rest.get_bucket_maxTTL()
        if default_maxttl != 0:
            self.fail("FAIL: default maxTTL if left unset must be 0 but is {0}".format(default_maxttl))
        self.log.info("Verified: default maxTTL if left unset is {0}".format(default_maxttl))

        # max value
        try:
            self._update_bucket_maxTTL(maxttl=2147483648)
        except Exception as e:
            self.log.info("Expected exception : {0}".format(e))
            try:
                self._update_bucket_maxTTL(maxttl=2147483647)
            except Exception as e:
                self.fail("Unable to set maxTTL=2147483647, the max permitted value")
            else:
                self.log.info("Verified: Max value permitted is 2147483647")
        else:
            self.fail("Able to set maxTTL greater than 2147483647")

        # min value
        try:
            self._update_bucket_maxTTL(maxttl=0)
        except Exception as e:
            self.fail("Unable to set maxTTL=0, the min permitted value")
        else:
            self.log.info("Verified: Min value permitted is 0")

        # negative value
        try:
            self._update_bucket_maxTTL(maxttl=-60)
        except Exception as e:
            self.log.info("Verified: negative values not permitted, exception : {0}".format(e))
        else:
            self.fail("FAIL: Able to set a negative maxTTL")

        # date/string
        try:
            self._update_bucket_maxTTL(maxttl="12/23/2016")
        except Exception as e:
            self.log.info("Verified: string not permitted, exception : {0}".format(e))
        else:
            self.fail("FAIL: Able to set a date string maxTTL")
 def perform_compaction(self, server=None, index=None, timeout=120):
     import time
     rest = RestConnection(server)
     rest.start_fts_index_compaction(index_name=index.name)
     done = False
     start_time = time.time()
     while not done:
         if time.time() - start_time > timeout:
             self.fail("Compaction operation is failed due to timeout.")
         _, compactions_state_content = rest.get_fts_index_compactions(
             index.name)
         done = 'In progress' not in str(compactions_state_content)
         self.sleep(1)
Exemple #38
0
 def reload_node_cert_with_plain_password(self, node):
     params = dict()
     params["privateKeyPassphrase"] = dict()
     params["privateKeyPassphrase"]["type"] = "plain"
     params["privateKeyPassphrase"]["password"] = self.plain_passw_map[str(
         node.ip)]
     params = json.dumps(params)
     rest = RestConnection(node)
     status, content = rest.reload_certificate(params=params)
     if not status:
         msg = "Could not load reload node cert on %s; Failed with error %s" \
               % (node.ip, content)
         raise Exception(msg)
Exemple #39
0
 def validate_items_count(self):
     items_GleambookUsers = RestConnection(self.query_node).query_tool(
         'select count(*) from GleambookUsers')['results'][0]['$1']
     items_GleambookMessages = RestConnection(self.query_node).query_tool(
         'select count(*) from GleambookMessages')['results'][0]['$1']
     items_ChirpMessages = RestConnection(self.query_node).query_tool(
         'select count(*) from ChirpMessages')['results'][0]['$1']
     self.log.info("Items in CB GleanBookUsers bucket: %s" %
                   items_GleambookUsers)
     self.log.info("Items in CB GleambookMessages bucket: %s" %
                   items_GleambookMessages)
     self.log.info("Items in CB ChirpMessages bucket: %s" %
                   items_ChirpMessages)
 def setUp(self):
     super(EventingVolume, self).setUp()
     self.rest.set_service_memoryQuota(service='memoryQuota',
                                       memoryQuota=2800)
     if self.create_functions_buckets:
         self.bucket_size = 1000
         self.metadata_bucket_size = 300
         self.replicas = 0
         bucket_params = self._create_bucket_params(server=self.server,
                                                    size=1500,
                                                    replicas=self.replicas)
         bucket_params_meta = self._create_bucket_params(
             server=self.server,
             size=self.metadata_bucket_size,
             replicas=self.replicas)
         self.cluster.create_standard_bucket(name=self.src_bucket_name,
                                             port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.src_bucket = RestConnection(self.master).get_buckets()
         bucket_params = self._create_bucket_params(server=self.server,
                                                    size=1000,
                                                    replicas=self.replicas)
         self.cluster.create_standard_bucket(name=self.dst_bucket_name,
                                             port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.cluster.create_standard_bucket(
             name=self.metadata_bucket_name,
             port=STANDARD_BUCKET_PORT + 1,
             bucket_params=bucket_params_meta)
         self.buckets = RestConnection(self.master).get_buckets()
         self.hostname = "http://qa.sc.couchbase.com/"
         self.create_n_scope(self.dst_bucket_name, 5)
         self.create_n_scope(self.src_bucket_name, 5)
         self.create_n_collections(self.dst_bucket_name, "scope_1", 5)
         self.create_n_collections(self.src_bucket_name, "scope_1", 5)
         self.handler_code = "handler_code/ABO/insert_rebalance.js"
     # index is required for delete operation through n1ql
     self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
     self.n1ql_helper = N1QLHelper(shell=self.shell,
                                   max_verify=self.max_verify,
                                   buckets=self.buckets,
                                   item_flag=self.item_flag,
                                   n1ql_port=self.n1ql_port,
                                   full_docs_list=self.full_docs_list,
                                   log=self.log,
                                   input=self.input,
                                   master=self.master,
                                   use_rest=True)
     self.n1ql_helper.create_primary_index(using_gsi=True,
                                           server=self.n1ql_node)
     self.batch_size = 10**4
Exemple #41
0
 def setUp(self):
     super(EventingBucket, self).setUp()
     self.rest.set_service_memoryQuota(service='memoryQuota',
                                       memoryQuota=500)
     if self.create_functions_buckets:
         self.bucket_size = 100
         log.info(self.bucket_size)
         bucket_params = self._create_bucket_params(
             server=self.server,
             size=self.bucket_size,
             replicas=self.num_replicas)
         self.cluster.create_standard_bucket(name=self.src_bucket_name,
                                             port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.src_bucket = RestConnection(self.master).get_buckets()
         self.cluster.create_standard_bucket(name=self.dst_bucket_name,
                                             port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.cluster.create_standard_bucket(name=self.metadata_bucket_name,
                                             port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.buckets = RestConnection(self.master).get_buckets()
     self.gens_load = self.generate_docs(self.docs_per_day)
     self.expiry = 3
     handler_code = self.input.param('handler_code', 'bucket_op')
     if handler_code == 'bucket_op':
         self.handler_code = HANDLER_CODE.DELETE_BUCKET_OP_ON_DELETE
     elif handler_code == 'bucket_op_with_timers':
         self.handler_code = HANDLER_CODE.BUCKET_OPS_WITH_TIMERS
     elif handler_code == 'bucket_op_with_cron_timers':
         self.handler_code = HANDLER_CODE.BUCKET_OPS_WITH_CRON_TIMERS
     elif handler_code == 'n1ql_op_with_timers':
         # index is required for delete operation through n1ql
         self.n1ql_node = self.get_nodes_from_services_map(
             service_type="n1ql")
         self.n1ql_helper = N1QLHelper(shell=self.shell,
                                       max_verify=self.max_verify,
                                       buckets=self.buckets,
                                       item_flag=self.item_flag,
                                       n1ql_port=self.n1ql_port,
                                       full_docs_list=self.full_docs_list,
                                       log=self.log,
                                       input=self.input,
                                       master=self.master,
                                       use_rest=True)
         self.n1ql_helper.create_primary_index(using_gsi=True,
                                               server=self.n1ql_node)
         self.handler_code = HANDLER_CODE.N1QL_OPS_WITH_TIMERS
     else:
         self.handler_code = HANDLER_CODE.DELETE_BUCKET_OP_ON_DELETE
 def get_services_map(self, reset=True, master=None):
     if not reset:
         return
     else:
         self.services_map = {}
     if not master:
         master = self.master
     rest = RestConnection(master)
     map = rest.get_nodes_services()
     for key, val in map.iteritems():
         for service in val:
             if service not in self.services_map.keys():
                 self.services_map[service] = []
             self.services_map[service].append(key)
Exemple #43
0
    def setUp(self):
        super(CBASBaseTest, self).setUp()
        self.cbas_node = self.input.cbas
        self.analytics_helper = AnalyticsHelper()
        self._cb_cluster = self.cluster
        self.travel_sample_docs_count = 31591
        self.beer_sample_docs_count = 7303
        invalid_ip = '10.111.151.109'
        self.cb_bucket_name = self.input.param('cb_bucket_name',
                                               'travel-sample')
        self.cbas_bucket_name = self.input.param('cbas_bucket_name', 'travel')
        self.cb_bucket_password = self.input.param('cb_bucket_password', '')
        self.expected_error = self.input.param("error", None)

        if self.expected_error:
            self.expected_error = self.expected_error.replace(
                "INVALID_IP", invalid_ip)
            self.expected_error = self.expected_error.replace(
                "PORT", self.master.port)

        self.cb_server_ip = self.input.param("cb_server_ip", self.master.ip)
        self.cb_server_ip = self.cb_server_ip.replace('INVALID_IP', invalid_ip)
        self.cbas_dataset_name = self.input.param("cbas_dataset_name",
                                                  'travel_ds')
        self.cbas_bucket_name_invalid = self.input.param(
            'cbas_bucket_name_invalid', self.cbas_bucket_name)
        self.cbas_dataset2_name = self.input.param('cbas_dataset2_name', None)
        self.skip_create_dataset = self.input.param('skip_create_dataset',
                                                    False)
        self.disconnect_if_connected = self.input.param(
            'disconnect_if_connected', False)
        self.cbas_dataset_name_invalid = self.input.param(
            'cbas_dataset_name_invalid', self.cbas_dataset_name)
        self.skip_drop_connection = self.input.param('skip_drop_connection',
                                                     False)
        self.skip_drop_dataset = self.input.param('skip_drop_dataset', False)

        self.query_id = self.input.param('query_id', None)
        self.mode = self.input.param('mode', None)
        self.num_concurrent_queries = self.input.param('num_queries', 5000)
        self.concurrent_batch_size = self.input.param('concurrent_batch_size',
                                                      100)
        self.compiler_param = self.input.param('compiler_param', None)
        self.compiler_param_val = self.input.param('compiler_param_val', None)
        self.expect_reject = self.input.param('expect_reject', False)
        self.expect_failure = self.input.param('expect_failure', False)

        self.rest = RestConnection(self.master)
        # Drop any existing buckets and datasets
        self.cleanup_cbas()
 def setUp(self):
     super(EventingVolume, self).setUp()
     # Un-deploy and delete all the functions
     self.undeploy_delete_all_functions()
     self.dst_bucket_name2 = self.input.param('dst_bucket_name2', 'dst_bucket2')
     self.dst_bucket_name3 = self.input.param('dst_bucket_name3', 'dst_bucket3')
     self.sbm_bucket= self.input.param('sbm_bucket', 'sbm_bucket')
     self.worker_count = self.input.param('worker_count', 3)
     self.cpp_worker_thread_count = self.input.param('cpp_worker_thread_count', 3)
     self.rest.set_service_memoryQuota(service='memoryQuota', memoryQuota=2800)
     if self.create_functions_buckets:
         self.bucket_size = 300
         # self.meta_bucket_size = 500
         # self.bucket_size = 600
         self.meta_bucket_size = 100
         bucket_params = self._create_bucket_params(server=self.server, size=self.bucket_size,
                                                    replicas=self.num_replicas)
         bucket_params_meta = self._create_bucket_params(server=self.server, size=self.meta_bucket_size,
                                                         replicas=self.num_replicas)
         self.cluster.create_standard_bucket(name=self.src_bucket_name, port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.cluster.create_standard_bucket(name=self.sbm_bucket, port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.src_bucket = RestConnection(self.master).get_buckets()
         self.cluster.create_standard_bucket(name=self.dst_bucket_name, port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.cluster.create_standard_bucket(name=self.dst_bucket_name1, port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.cluster.create_standard_bucket(name=self.dst_bucket_name2, port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.cluster.create_standard_bucket(name=self.dst_bucket_name3, port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.cluster.create_standard_bucket(name=self.metadata_bucket_name, port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params_meta)
         self.buckets = RestConnection(self.master).get_buckets()
     self.gens_load = self.generate_docs(self.docs_per_day)
     self.batch_size = 1000000
     # index is required for delete operation through n1ql
     self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
     self.n1ql_helper = N1QLHelper(shell=self.shell,
                                   max_verify=self.max_verify,
                                   buckets=self.buckets,
                                   item_flag=self.item_flag,
                                   n1ql_port=self.n1ql_port,
                                   full_docs_list=self.full_docs_list,
                                   log=self.log, input=self.input,
                                   master=self.master,
                                   use_rest=True
                                   )
     self.n1ql_helper.create_primary_index(using_gsi=True, server=self.n1ql_node)
    def create_required_buckets(self):
        self.log.info("Get the available memory quota")
        bucket_util = bucket_utils(self.master)
        self.info = bucket_util.rest.get_nodes_self()
        threadhold_memory = 0
        total_memory_in_mb = self.info.memoryFree / 1024**2
        total_available_memory_in_mb = total_memory_in_mb
        active_service = self.info.services

        if "index" in active_service:
            total_available_memory_in_mb -= self.info.indexMemoryQuota
        if "fts" in active_service:
            total_available_memory_in_mb -= self.info.ftsMemoryQuota
        if "cbas" in active_service:
            total_available_memory_in_mb -= self.info.cbasMemoryQuota
        if "eventing" in active_service:
            total_available_memory_in_mb -= self.info.eventingMemoryQuota

        print(total_memory_in_mb)
        available_memory = total_available_memory_in_mb - threadhold_memory
        self.rest.set_service_memoryQuota(service='memoryQuota',
                                          memoryQuota=available_memory)
        self.rest.set_service_memoryQuota(service='cbasMemoryQuota',
                                          memoryQuota=available_memory - 1024)
        self.rest.set_service_memoryQuota(service='indexMemoryQuota',
                                          memoryQuota=available_memory - 1024)

        self.log.info("Create CB buckets")

        self.create_bucket(self.master,
                           "GleambookUsers",
                           bucket_ram=available_memory / 3)
        self.create_bucket(self.master,
                           "GleambookMessages",
                           bucket_ram=available_memory / 3)
        shell = RemoteMachineShellConnection(self.master)
        command = 'curl -i -u Administrator:password --data \'ns_bucket:update_bucket_props("ChirpMessages", [{extra_config_string, "cursor_dropping_upper_mark=70;cursor_dropping_lower_mark=50"}]).\' http://%s:8091/diag/eval' % self.master
        shell.execute_command(command)
        command = 'curl -i -u Administrator:password --data \'ns_bucket:update_bucket_props("GleambookMessages", [{extra_config_string, "cursor_dropping_upper_mark=70;cursor_dropping_lower_mark=50"}]).\' http://%s:8091/diag/eval' % self.master
        shell.execute_command(command)

        result = RestConnection(self.query_node).query_tool(
            "CREATE PRIMARY INDEX idx_GleambookUsers ON GleambookUsers;")
        self.sleep(10, "wait for index creation.")
        self.assertTrue(result['status'] == "success")

        result = RestConnection(self.query_node).query_tool(
            "CREATE PRIMARY INDEX idx_GleambookMessages ON GleambookMessages;")
        self.sleep(10, "wait for index creation.")
        self.assertTrue(result['status'] == "success")
Exemple #46
0
def perform_cb_collect(_input, log_path=None):
    import logger
    log = logger.Logger.get_logger()
    for node in _input.servers:
        params = dict()
        if len(_input.servers) != 1:
            params['nodes'] = 'ns_1@' + node.ip
        else:
            # In case of single node we have to pass ip as below
            params['nodes'] = 'ns_1@' + '127.0.0.1'

        log.info('Collecting log on node ' + node.ip)
        rest = RestConnection(node)
        status, _, _ = rest.perform_cb_collect(params)
        time.sleep(
            10
        )  # This is needed as it takes a few seconds before the collection start
        log.info('CB collect status on %s is %s' % (node.ip, status))

        log.info('Polling active task endpoint to check CB collect status')
        if status is True:
            cb_collect_response = {}
            while True:
                content = rest.active_tasks()
                for response in content:
                    if response['type'] == 'clusterLogsCollection':
                        cb_collect_response = response
                        break
                if cb_collect_response['status'] == 'completed':
                    log.info(cb_collect_response)
                    break
                else:
                    time.sleep(
                        10
                    )  # CB collect in progress, wait for 10 seconds and check progress again

            log.info('Copy CB collect ZIP file to Client')
            remote_client = RemoteMachineShellConnection(node)
            cb_collect_path = cb_collect_response['perNode'][
                params['nodes']]['path']
            zip_file_copied = remote_client.get_file(
                os.path.dirname(cb_collect_path),
                os.path.basename(cb_collect_path), log_path)
            log.info('%s node cb collect zip coped on client : %s' %
                     (node.ip, zip_file_copied))

            if zip_file_copied:
                remote_client.execute_command("rm -f %s" % cb_collect_path)
                remote_client.disconnect()
 def setUp(self):
     super(EventingUpgrade, self).setUp()
     self.rest = RestConnection(self.master)
     self.server = self.master
     self.queue = queue.Queue()
     self.src_bucket_name = self.input.param('src_bucket_name', 'src_bucket')
     self.eventing_log_level = self.input.param('eventing_log_level', 'INFO')
     self.dst_bucket_name = self.input.param('dst_bucket_name', 'dst_bucket')
     self.dst_bucket_name1 = self.input.param('dst_bucket_name1', 'dst_bucket1')
     self.dst_bucket_curl = self.input.param('dst_bucket_curl', 'dst_bucket_curl')
     self.source_bucket_mutation = self.input.param('source_bucket_mutation', 'source_bucket_mutation')
     self.metadata_bucket_name = self.input.param('metadata_bucket_name', 'metadata')
     self.n1ql_op_dst=self.input.param('n1ql_op_dst', 'n1ql_op_dst')
     self.gens_load = self.generate_docs(self.docs_per_day)
     self.upgrade_version = self.input.param("upgrade_version")
Exemple #48
0
    def test_config_settings(self):
        config = self._input.param("config", "bleveMaxResultWindow")
        value = self._input.param("value", 100000)
        rest = RestConnection(self._cb_cluster.get_fts_nodes()[0])

        rest.set_node_setting(config, value)
        global_vars.system_event_logs.add_event(
            SearchServiceEvents.fts_settings_updated())

        verification_config_setting = rest.get_node_settings(config)

        if str(verification_config_setting) != str(value):
            self.fail(
                f'{verification_config_setting} is not equal to expected {value} for config:{config}'
            )
Exemple #49
0
 def change_retry_rebalance_settings(self, enabled=True,
                                     afterTimePeriod=300, maxAttempts=1):
     # build the body
     body = dict()
     if enabled:
         body["enabled"] = "true"
     else:
         body["enabled"] = "false"
     body["afterTimePeriod"] = afterTimePeriod
     body["maxAttempts"] = maxAttempts
     rest = RestConnection(self.master)
     rest.set_retry_rebalance_settings(body)
     result = rest.get_retry_rebalance_settings()
     self.log.info("Retry Rebalance settings changed to : {0}"
                   .format(json.loads(result)))
Exemple #50
0
    def start(self, nodes, bucket, pnames, name, frequency, client_id='',
              collect_server_stats=True, ddoc=None):
        """This function starts collecting stats from all nodes with the given
        frequency"""
        self._task = {"state": "running", "threads": [], "name": name,
                      "time": time.time(), "ops": [], "totalops": [],
                      "ops-temp": [], "latency": {}, "data_size_stats": []}
        rest = RestConnection(nodes[0])
        info = rest.get_nodes_self()
        self.data_path = info.storage[0].get_data_path()
        self.client_id = str(client_id)
        self.nodes = nodes

        if collect_server_stats:
            mbstats_thread = Thread(target=self.membase_stats,
                                    args=(nodes, bucket, 60, self._verbosity))
            mbstats_thread.start()
            sysstats_thread = Thread(target=self.system_stats,
                                     args=(nodes, pnames, frequency, self._verbosity))
            sysstats_thread.start()
            iostats_thread = Thread(target=self.iostats,
                                    args=(nodes, 10, self._verbosity))
            iostats_thread.start()
            ns_server_stats_thread = Thread(target=self.ns_server_stats,
                                            args=(nodes, bucket, 60))
            ns_server_stats_thread.start()
            bucket_size_thead = Thread(target=self.get_bucket_size,
                                       args=(bucket, nodes, frequency))
            bucket_size_thead.start()

            self._task["threads"] = [sysstats_thread, ns_server_stats_thread,
                                     bucket_size_thead, mbstats_thread]
            if ddoc is not None:
                view_stats_thread = Thread(target=self.collect_indexing_stats,
                                           args=(nodes, bucket, ddoc, frequency))
                indexing_stats_thread = Thread(target=self.measure_indexing_throughput,
                                               args=(nodes, ))
                view_stats_thread.start()
                indexing_stats_thread.start()
                self._task["threads"].append(view_stats_thread)
                self._task["threads"].append(indexing_stats_thread)

            # Getting build/machine stats from only one node in the cluster
            self.build_stats(nodes)
            self.machine_stats(nodes)

            # Start atop
            self.start_atop()
Exemple #51
0
 def setUp(self):
     if self._testMethodDoc:
         log.info("\n\nStarting Test: %s \n%s" % (self._testMethodName, self._testMethodDoc))
     else:
         log.info("\n\nStarting Test: %s" % (self._testMethodName))
     self.input = TestInputSingleton.input
     self.input.test_params.update({"default_bucket": False})
     super(EventingBaseTest, self).setUp()
     self.master = self.servers[0]
     self.server = self.master
     self.restServer = self.get_nodes_from_services_map(service_type="eventing")
     self.rest = RestConnection(self.restServer)
     self.log.info(
         "Setting the min possible memory quota so that adding mode nodes to the cluster wouldn't be a problem.")
     self.rest.set_service_memoryQuota(service='memoryQuota', memoryQuota=330)
     self.rest.set_service_memoryQuota(service='indexMemoryQuota', memoryQuota=INDEX_QUOTA)
     # self.rest.set_service_memoryQuota(service='eventingMemoryQuota', memoryQuota=EVENTING_QUOTA)
     self.src_bucket_name = self.input.param('src_bucket_name', 'src_bucket')
     self.eventing_log_level = self.input.param('eventing_log_level', 'TRACE')
     self.dst_bucket_name = self.input.param('dst_bucket_name', 'dst_bucket')
     self.dst_bucket_name1 = self.input.param('dst_bucket_name1', 'dst_bucket1')
     self.metadata_bucket_name = self.input.param('metadata_bucket_name', 'metadata')
     self.create_functions_buckets = self.input.param('create_functions_buckets', True)
     self.docs_per_day = self.input.param("doc-per-day", 1)
     random.seed(datetime.time)
     self.function_name = "Function_{0}_{1}".format(random.randint(1, 1000000000), self._testMethodName)
 def set_indexer_logLevel(self, loglevel="info"):
     """
     :param loglevel:
     Possible Values
         -- info
         -- debug
         -- warn
         -- verbose
         -- Silent
         -- Fatal
         -- Error
         -- Timing
         -- Trace
     """
     self.log.info("Setting indexer log level to {0}".format(loglevel))
     server = self.get_nodes_from_services_map(service_type="index")
     rest = RestConnection(server)
     status = rest.set_indexer_params("logLevel", loglevel)
Exemple #53
0
 def rebalance_progress(self, interval=15):
     self._task["rebalance_progress"] = list()
     nodes = cycle(self.nodes)
     rest = RestConnection(nodes.next())
     while not self._aborted():
         try:
             tasks = rest.ns_server_tasks()
         except ServerUnavailableException, error:
             log.error(error)
             rest = RestConnection(nodes.next())
             continue
         for task in tasks:
             if task["type"] == "rebalance":
                 self._task["rebalance_progress"].append({
                     "rebalance_progress": task.get("progress", 0),
                     "timestamp": time.time()
                 })
                 break
         time.sleep(interval)
Exemple #54
0
 def verify_eventing_results_of_all_functions(self, docs_expected, verify_results=True):
     if verify_results:
         # Verify the results of all the buckets
         self.verify_eventing_results(self.function_name, docs_expected, skip_stats_validation=True)
         self.verify_eventing_results(self.function_name, docs_expected, skip_stats_validation=True,
                                      bucket=self.dst_bucket_name1)
         self.verify_eventing_results(self.function_name, docs_expected, skip_stats_validation=True,
                                      bucket=self.dst_bucket_name2)
     else:
         # Just print the stats after sleeping for 10 mins. Required to get the latest stats.
         self.sleep(600)
         eventing_nodes = self.get_nodes_from_services_map(service_type="eventing", get_all_nodes=True)
         for eventing_node in eventing_nodes:
             rest_conn = RestConnection(eventing_node)
             out = rest_conn.get_all_eventing_stats()
             log.info("Stats for Node {0} is \n{1} ".format(eventing_node.ip, json.dumps(out, sort_keys=True,
                                                                                         indent=4)))
         for bucket in [self.dst_bucket_name, self.dst_bucket_name1, self.dst_bucket_name2]:
             stats_dst = self.rest.get_bucket_stats(bucket)
             log.info("Number of docs in {0} bucket actual : {1} expected : {2} ".format(bucket,
                                                                                         stats_dst["curr_items"],
                                                                                         docs_expected))
Exemple #55
0
    def collect_indexing_stats(self, nodes, bucket, ddoc, frequency):
        """Collect view indexing stats"""
        self._task['view_info'] = list()

        while not self._aborted():
            time.sleep(frequency)
            log.info("Collecting view indexing stats")
            for node in nodes:
                rest = RestConnection(node)
                data = rest.set_view_info(bucket, ddoc)
                update_history = data[1]['stats']['update_history']
                try:
                    indexing_time = \
                        [event['indexing_time'] for event in update_history]
                    avg_time = sum(indexing_time) / len(indexing_time)
                except (IndexError, KeyError):
                    avg_time = 0
                finally:
                    self._task['view_info'].append({'node': node.ip,
                                                    'indexing_time': avg_time,
                                                    'timestamp': time.time()})

        log.info("Finished collecting view indexing stats")
 def test_fts_log_redaction(self):
     gen_create = BlobGenerator('logredac', 'logredac-', self.value_size, end=self.num_items)
     self._load_all_buckets(self.master, gen_create, "create", 0)
     index_definition = {
         "type": "fulltext-index",
         "name": "index1",
         "sourceType": "couchbase",
         "sourceName": "default"
     }
     rest = RestConnection(self.master)
     status = rest.create_fts_index("index1", index_definition)
     if status:
         log.info("Index 'index1' created")
     else:
         log.info("Error creating index, status = {0}".format(status))
     self.sleep(60, "waiting for docs to get indexed")
     query_json = {"query": {"field": "type", "match": "emp"}}
     hits, _, _, _ = rest.run_fts_query(index_name="index1",
                        query_json=query_json)
     log.info("Hits from query {0}: {1}".format(query_json, hits))
     self.set_redaction_level()
     self.start_logs_collection()
     result = self.monitor_logs_collection()
     try:
         logs_path = result["perNode"]["ns_1@" + str(self.master.ip)]["path"]
     except KeyError:
         logs_path = result["perNode"]["[email protected]"]["path"]
     redactFileName = logs_path.split('/')[-1]
     nonredactFileName = logs_path.split('/')[-1].replace('-redacted', '')
     remotepath = logs_path[0:logs_path.rfind('/') + 1]
     self.verify_log_files_exist(remotepath=remotepath,
                                 redactFileName=redactFileName,
                                 nonredactFileName=nonredactFileName)
     self.verify_log_redaction(remotepath=remotepath,
                               redactFileName=redactFileName,
                               nonredactFileName=nonredactFileName,
                               logFileName="ns_server.fts.log")
    def test_cbcollect_with_redaction_enabled_with_xdcr(self):
        rest_src = RestConnection(self.master)
        rest_src.remove_all_replications()
        rest_src.remove_all_remote_clusters()

        rest_dest = RestConnection(self.servers[1])
        rest_dest_helper = RestHelper(rest_dest)

        try:
            rest_src.remove_all_replications()
            rest_src.remove_all_remote_clusters()
            self.set_redaction_level()
            rest_src.add_remote_cluster(self.servers[1].ip, self.servers[1].port,
                                        self.servers[1].rest_username,
                                        self.servers[1].rest_password, "C2")

            """ at dest cluster """
            self.add_built_in_server_user(node=self.servers[1])
            rest_dest.create_bucket(bucket='default', ramQuotaMB=512)
            bucket_ready = rest_dest_helper.vbucket_map_ready('default')
            if not bucket_ready:
                self.fail("Bucket default at dest not created after 120 seconds.")
            repl_id = rest_src.start_replication('continuous', 'default', "C2")
            if repl_id is not None:
                self.log.info("Replication created successfully")
            gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
            tasks = self._async_load_all_buckets(self.master, gen, "create", 0)
            for task in tasks:
                task.result()
            self.sleep(10)

            """ enable firewall """
            if self.interrupt_replication:
                RemoteUtilHelper.enable_firewall(self.master, xdcr=True)

            """ start collect logs """
            self.start_logs_collection()
            result = self.monitor_logs_collection()
            """ verify logs """
            try:
                logs_path = result["perNode"]["ns_1@" + str(self.master.ip)]["path"]
            except KeyError:
                logs_path = result["perNode"]["[email protected]"]["path"]
            redactFileName = logs_path.split('/')[-1]
            nonredactFileName = logs_path.split('/')[-1].replace('-redacted', '')
            remotepath = logs_path[0:logs_path.rfind('/')+1]
            self.verify_log_files_exist(remotepath=remotepath,
                                    redactFileName=redactFileName,
                                    nonredactFileName=nonredactFileName)
            self.log.info("Verify on log ns_server.goxdcr.log")
            self.verify_log_redaction(remotepath=remotepath,
                                  redactFileName=redactFileName,
                                  nonredactFileName=nonredactFileName,
                                  logFileName="ns_server.goxdcr.log")
        finally:
            """ clean up xdcr """
            rest_dest.delete_bucket()
            rest_src.remove_all_replications()
            rest_src.remove_all_remote_clusters()
            if self.interrupt_replication:
                shell = RemoteMachineShellConnection(self.master)
                shell.disable_firewall()
                shell.disconnect()
Exemple #58
0
 def build_info(node):
     rest = RestConnection(node)
     api = rest.baseUrl + 'nodes/self'
     status, content, header = rest._http_request(api)
     json_parsed = json.loads(content)
     return json_parsed
    def setUp(self, add_defualt_cbas_node = True):
        self.log = logger.Logger.get_logger()
        if self._testMethodDoc:
            self.log.info("\n\nStarting Test: %s \n%s"%(self._testMethodName,self._testMethodDoc))
        else:
            self.log.info("\n\nStarting Test: %s"%(self._testMethodName))
        super(CBASBaseTest, self).setUp()
        self.cbas_node = self.input.cbas
        self.cbas_servers = []
        self.kv_servers = []
 
        for server in self.servers:
            if "cbas" in server.services:
                self.cbas_servers.append(server)
            if "kv" in server.services:
                self.kv_servers.append(server)
        
        self.analytics_helper = AnalyticsHelper()
        self._cb_cluster = self.cluster
        self.travel_sample_docs_count = 31591
        self.beer_sample_docs_count = 7303
        invalid_ip = '10.111.151.109'
        self.cb_bucket_name = self.input.param('cb_bucket_name', 'travel-sample')
        self.cbas_bucket_name = self.input.param('cbas_bucket_name', 'travel')
        self.cb_bucket_password = self.input.param('cb_bucket_password', None)
        self.expected_error = self.input.param("error", None)
        if self.expected_error:
            self.expected_error = self.expected_error.replace("INVALID_IP",invalid_ip)
            self.expected_error = self.expected_error.replace("PORT",self.master.port)
        self.cb_server_ip = self.input.param("cb_server_ip", None)
        self.cb_server_ip = self.cb_server_ip.replace('INVALID_IP',invalid_ip) if self.cb_server_ip is not None else None
        self.cbas_dataset_name = self.input.param("cbas_dataset_name", 'travel_ds')
        self.cbas_bucket_name_invalid = self.input.param('cbas_bucket_name_invalid', self.cbas_bucket_name)
        self.cbas_dataset2_name = self.input.param('cbas_dataset2_name', None)
        self.skip_create_dataset = self.input.param('skip_create_dataset', False)
        self.disconnect_if_connected = self.input.param('disconnect_if_connected', False)
        self.cbas_dataset_name_invalid = self.input.param('cbas_dataset_name_invalid', self.cbas_dataset_name)
        self.skip_drop_connection = self.input.param('skip_drop_connection',False)
        self.skip_drop_dataset = self.input.param('skip_drop_dataset', False)
        self.query_id = self.input.param('query_id',None)
        self.mode = self.input.param('mode',None)
        self.num_concurrent_queries = self.input.param('num_queries', 5000)
        self.concurrent_batch_size = self.input.param('concurrent_batch_size', 100)
        self.compiler_param = self.input.param('compiler_param', None)
        self.compiler_param_val = self.input.param('compiler_param_val', None)
        self.expect_reject = self.input.param('expect_reject', False)
        self.expect_failure = self.input.param('expect_failure', False)
        self.index_name = self.input.param('index_name', None)
        self.index_fields = self.input.param('index_fields', None)
        if self.index_fields:
            self.index_fields = self.index_fields.split("-")
        self.otpNodes = []

        self.rest = RestConnection(self.master)
        
        self.log.info("Setting the min possible memory quota so that adding mode nodes to the cluster wouldn't be a problem.")
        self.rest.set_service_memoryQuota(service='memoryQuota', memoryQuota=MIN_KV_QUOTA)
        self.rest.set_service_memoryQuota(service='ftsMemoryQuota', memoryQuota=FTS_QUOTA)
        self.rest.set_service_memoryQuota(service='indexMemoryQuota', memoryQuota=INDEX_QUOTA)
        self.rest.set_service_memoryQuota(service='cbasMemoryQuota', memoryQuota=CBAS_QUOTA)
        
        # Drop any existing buckets and datasets
        if self.cbas_node:
            self.cleanup_cbas()
                    
        if not self.cbas_node and len(self.cbas_servers)>=1:
            self.cbas_node = self.cbas_servers[0]
            if "cbas" in self.master.services:
                self.cleanup_cbas()
            if add_defualt_cbas_node:
                if self.master.ip != self.cbas_node.ip:
                    self.otpNodes.append(self.add_node(self.cbas_node))
                else:
                    self.otpNodes = self.rest.node_statuses()
                ''' This cbas cleanup is actually not needed.
                    When a node is added to the cluster, it is automatically cleaned-up.'''
                self.cleanup_cbas()
                self.cbas_servers.remove(self.cbas_node)
        
        self.log.info("==============  CBAS_BASE setup was finished for test #{0} {1} ==============" \
                          .format(self.case_number, self._testMethodName))