Beispiel #1
0
 def setUp(self):
     super(N1qlBase, self).setUp()
     self.scan_consistency = self.input.param("scan_consistency",
                                              'REQUEST_PLUS')
     self.path = testconstants.LINUX_COUCHBASE_BIN_PATH
     self.use_rest = self.input.param("use_rest", True)
     self.hint_index = self.input.param("hint", None)
     self.n1ql_port = self.input.param("n1ql_port", 8093)
     self.analytics = self.input.param("analytics", False)
     self.named_prepare = self.input.param("named_prepare", None)
     self.version = self.input.param("cbq_version", "sherlock")
     self.isprepared = False
     self.ipv6 = self.input.param("ipv6", False)
     self.trans = self.input.param("n1ql_txn", True)
     self.commit = self.input.param("commit", True)
     self.rollback_to_savepoint = self.input.param("rollback_to_savepoint", False)
     self.skip_index = self.input.param("skip_index", False)
     self.primary_indx_type = self.input.param("primary_indx_type", 'GSI')
     self.index_type = self.input.param("index_type", 'GSI')
     self.skip_primary_index = self.input.param("skip_primary_index", False)
     self.flat_json = self.input.param("flat_json", False)
     self.dataset = self.input.param("dataset", "sabre")
     self.array_indexing = self.input.param("array_indexing", False)
     self.max_verify = self.input.param("max_verify", None)
     self.num_stmt_txn = self.input.param("num_stmt_txn", 5)
     self.num_collection = self.input.param("num_collection", 1)
     self.num_savepoints = self.input.param("num_savepoints", 0)
     self.override_savepoint = self.input.param("override_savepoint", 0)
     self.num_buckets = self.input.param("num_buckets", 1)
     self.prepare = self.input.param("prepare_stmt", False)
     self.num_txn = self.input.param("num_txn", 3)
     self.clause = WhereClause()
     self.buckets = self.bucket_util.get_all_buckets(self.cluster)
     self.collection_map = {}
     self.txtimeout = self.input.param("txntimeout", 0)
     self.atrcollection = self.input.param("atrcollection", False)
     self.num_commit = self.input.param("num_commit", 3)
     self.num_rollback_to_savepoint = \
         self.input.param("num_rollback_to_savepoint", 0)
     self.num_conflict = self.input.param("num_conflict", 0)
     self.write_conflict = self.input.param("write_conflict", False)
     self.Kvtimeout = self.input.param("Kvtimeout", None)
     self.memory_quota = self.input.param("memory_quota", 1)
     self.n1ql_server = self.cluster_util.get_nodes_from_services_map(
         cluster=self.cluster,
         service_type=CbServer.Services.N1QL,
         get_all_nodes=True)
     self.n1ql_helper = N1QLHelper(server=self.n1ql_server,
                                   use_rest=True,
                                   buckets=self.buckets,
                                   log=self.log,
                                   scan_consistency='REQUEST_PLUS',
                                   num_collection=self.num_collection,
                                   num_buckets=self.num_buckets,
                                   num_savepoints=self.num_savepoints,
                                   override_savepoint=self.override_savepoint,
                                   num_stmt=self.num_stmt_txn,
                                   load_spec=self.data_spec_name)
     self.num_insert, self.num_update, self.num_delete, self.num_merge = \
         self.n1ql_helper.get_random_number_stmt(self.num_stmt_txn)
Beispiel #2
0
 def setUp(self):
     self.array_indexing = False
     super(UpgradeN1QLRBAC, self).setUp()
     self.initial_version = self.input.param('initial_version',
                                             '4.6.0-3653')
     self.upgrade_to = self.input.param("upgrade_to")
     self.n1ql_helper = N1QLHelper(version=self.version,
                                   shell=self.shell,
                                   use_rest=self.use_rest,
                                   max_verify=self.max_verify,
                                   buckets=self.buckets,
                                   item_flag=self.item_flag,
                                   n1ql_port=self.n1ql_port,
                                   full_docs_list=[],
                                   log=self.log,
                                   input=self.input,
                                   master=self.master)
     self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
     log.info(self.n1ql_node)
     if self.ddocs_num:
         self.create_ddocs_and_views()
         gen_load = BlobGenerator('pre-upgrade',
                                  'preupgrade-',
                                  self.value_size,
                                  end=self.num_items)
         self._load_all_buckets(self.master,
                                gen_load,
                                "create",
                                self.expire_time,
                                flag=self.item_flag)
     self.query_standard_bucket = self.query_buckets[1]
 def setUp(self):
     super(CollectionsDropRecreateRebalance, self).setUp()
     self.known_nodes = self.cluster.servers[:self.nodes_init]
     self.nodes_failover = self.input.param("nodes_failover", 1)
     self.nodes_swap = self.input.param("nodes_swap", 0)
     self.recovery_type = self.input.param("recovery_type", "delta")
     self.rebalance_moves_per_node = self.input.param(
         "rebalance_moves_per_node", 2)
     self.cluster_util.set_rebalance_moves_per_nodes(
         rebalanceMovesPerNode=self.rebalance_moves_per_node)
     self.data_load_flag = False  # When to start/stop drop/recreate
     self.data_loading_thread = None
     self.data_load_exception = None  # Object variable to assign data load thread's exception
     self.N1qltxn = self.input.param("N1ql_txn", False)
     if self.N1qltxn:
         self.n1ql_server = self.cluster_util.get_nodes_from_services_map(
             service_type="n1ql", get_all_nodes=True)
         self.n1ql_helper = N1QLHelper(server=self.n1ql_server,
                                       use_rest=True,
                                       buckets=self.bucket_util.buckets,
                                       log=self.log,
                                       scan_consistency='REQUEST_PLUS',
                                       num_collection=3,
                                       num_buckets=1,
                                       num_savepoints=1,
                                       override_savepoint=False,
                                       num_stmt=10,
                                       load_spec=self.data_spec_name)
         self.bucket_col = self.n1ql_helper.get_collections()
         self.stmts = self.n1ql_helper.get_stmt(self.bucket_col)
         self.stmts = self.n1ql_helper.create_full_stmts(self.stmts)
Beispiel #4
0
 def _create_primary_index(self):
     if self.n1ql_server:
         if self.doc_ops:
             self.ops_dist_map = self.calculate_data_change_distribution(
                 create_per=self.create_ops_per,
                 update_per=self.update_ops_per,
                 delete_per=self.delete_ops_per,
                 expiry_per=self.expiry_ops_per,
                 start=0,
                 end=self.docs_per_day)
             log.info(self.ops_dist_map)
             self.docs_gen_map = self.generate_ops_docs(
                 self.docs_per_day, 0)
             self.full_docs_list_after_ops = self.generate_full_docs_list_after_ops(
                 self.docs_gen_map)
         # Define Helper Method which will be used for running n1ql queries, create index, drop index
         self.n1ql_helper = N1QLHelper(shell=self.shell,
                                       max_verify=self.max_verify,
                                       buckets=self.buckets,
                                       item_flag=self.item_flag,
                                       n1ql_port=self.n1ql_port,
                                       full_docs_list=self.full_docs_list,
                                       log=self.log,
                                       input=self.input,
                                       master=self.master,
                                       use_rest=True)
         log.info(self.n1ql_server)
         if self.create_primary_index:
             self.n1ql_helper.create_primary_index(
                 using_gsi=self.use_gsi_for_primary,
                 server=self.n1ql_server)
Beispiel #5
0
 def setUp(self):
     if self._testMethodDoc:
         log.info("\n\nStarting Test: %s \n%s" % (self._testMethodName, self._testMethodDoc))
     else:
         log.info("\n\nStarting Test: %s" % (self._testMethodName))
     self.input = TestInputSingleton.input
     self.input.test_params.update({"default_bucket": False})
     super(EventingBaseTest, self).setUp()
     self.master = self.servers[0]
     self.server = self.master
     self.restServer = self.get_nodes_from_services_map(service_type="eventing")
     self.rest = RestConnection(self.restServer)
     self.rest.set_indexer_storage_mode()
     self.log.info(
         "Setting the min possible memory quota so that adding mode nodes to the cluster wouldn't be a problem.")
     self.rest.set_service_memoryQuota(service='memoryQuota', memoryQuota=330)
     self.rest.set_service_memoryQuota(service='indexMemoryQuota', memoryQuota=INDEX_QUOTA)
     # self.rest.set_service_memoryQuota(service='eventingMemoryQuota', memoryQuota=EVENTING_QUOTA)
     self.src_bucket_name = self.input.param('src_bucket_name', 'src_bucket')
     self.eventing_log_level = self.input.param('eventing_log_level', 'INFO')
     self.dst_bucket_name = self.input.param('dst_bucket_name', 'dst_bucket')
     self.dst_bucket_name1 = self.input.param('dst_bucket_name1', 'dst_bucket1')
     self.metadata_bucket_name = self.input.param('metadata_bucket_name', 'metadata')
     self.create_functions_buckets = self.input.param('create_functions_buckets', True)
     self.docs_per_day = self.input.param("doc-per-day", 1)
     self.use_memory_manager = self.input.param('use_memory_manager', True)
     self.print_eventing_handler_code_in_logs = self.input.param('print_eventing_handler_code_in_logs', True)
     random.seed(datetime.time)
     function_name = "Function_{0}_{1}".format(random.randint(1, 1000000000), self._testMethodName)
     # See MB-28447, From now function name can only be max of 100 chars
     self.function_name = function_name[0:90]
     self.timer_storage_chan_size = self.input.param('timer_storage_chan_size', 10000)
     self.dcp_gen_chan_size = self.input.param('dcp_gen_chan_size', 10000)
     self.is_sbm=self.input.param('source_bucket_mutation', False)
     self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
     self.n1ql_helper = N1QLHelper(shell=self.shell, max_verify=self.max_verify, buckets=self.buckets,
                                   item_flag=self.item_flag, n1ql_port=self.n1ql_port,
                                   full_docs_list=self.full_docs_list, log=self.log, input=self.input,
                                   master=self.master, use_rest=True)
     self.pause_resume = self.input.param('pause_resume', False)
     self.pause_resume_number = self.input.param('pause_resume_number', 1)
     self.is_curl=self.input.param('curl', False)
     self.hostname = self.input.param('host', 'https://postman-echo.com/')
     self.curl_username = self.input.param('curl_user', None)
     self.curl_password = self.input.param('curl_password', None)
     self.auth_type = self.input.param('auth_type', 'no-auth')
     self.bearer_key=self.input.param('bearer_key', None)
     self.url = self.input.param('path', None)
     self.cookies = self.input.param('cookies', False)
     self.bearer_key = self.input.param('bearer_key', '')
     if self.hostname=='local':
         self.insall_dependencies()
         s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
         s.connect(("8.8.8.8", 80))
         ip = s.getsockname()[0]
         s.close()
         self.hostname= "http://"+ip+":1080/"
         self.log.info("local ip address:{}".format(self.hostname))
         self.setup_curl()
Beispiel #6
0
    def test_gsi_with_crud_with_redaction_enabled(self):
        # load bucket and do some ops
        self.set_indexer_logLevel("trace")
        json_generator = JsonGenerator()
        gen_docs = json_generator.generate_all_type_documents_for_gsi(
            docs_per_day=self.doc_per_day, start=0)
        full_docs_list = self.generate_full_docs_list(gen_docs)
        n1ql_helper = N1QLHelper(use_rest=True,
                                 buckets=self.buckets,
                                 full_docs_list=full_docs_list,
                                 log=log,
                                 input=self.input,
                                 master=self.master)
        self.load(gen_docs)
        n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
        query_definition_generator = SQLDefinitionGenerator()
        query_definitions = query_definition_generator.generate_airlines_data_query_definitions(
        )
        query_definitions = query_definition_generator.filter_by_group(
            "all", query_definitions)
        # set log redaction level, collect logs, verify log files exist and verify them for redaction
        self.set_redaction_level()
        self.start_logs_collection()
        # Create partial Index
        for query_definition in query_definitions:
            for bucket in self.buckets:
                create_query = query_definition.generate_index_create_query(
                    bucket.name)
                n1ql_helper.run_cbq_query(query=create_query, server=n1ql_node)

        for query_definition in query_definitions:
            for bucket in self.buckets:
                scan_query = query_definition.generate_query(
                    bucket=bucket.name)
                n1ql_helper.run_cbq_query(query=scan_query, server=n1ql_node)

        for query_definition in query_definitions:
            for bucket in self.buckets:
                drop_query = query_definition.generate_index_drop_query(
                    bucket=bucket.name)
                n1ql_helper.run_cbq_query(query=drop_query, server=n1ql_node)
        result = self.monitor_logs_collection()
        log.info(result)
        try:
            logs_path = result["perNode"]["ns_1@" +
                                          str(self.master.ip)]["path"]
        except KeyError:
            logs_path = result["perNode"]["[email protected]"]["path"]
        redactFileName = logs_path.split('/')[-1]
        nonredactFileName = logs_path.split('/')[-1].replace('-redacted', '')
        remotepath = logs_path[0:logs_path.rfind('/') + 1]
        log_file = self.input.param("log_file_name", "indexer.log")
        self.verify_log_files_exist(remotepath=remotepath,
                                    redactFileName=redactFileName,
                                    nonredactFileName=nonredactFileName)
        self.verify_log_redaction(remotepath=remotepath,
                                  redactFileName=redactFileName,
                                  nonredactFileName=nonredactFileName,
                                  logFileName="ns_server.{0}".format(log_file))
 def setUp(self):
     super(AggregatePushdownClass, self).setUp()
     self.n1ql_helper = N1QLHelper(master=self.master)
     self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
     self.aggr_distinct = self.input.param("aggr_distinct", False)
     self.order_by = self.input.param("order_by", False)
     self.having = self.input.param("having", False)
     self.offset_limit = self.input.param("offset_limit", False)
 def _create_primary_index(self):
     n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
     n1ql_helper = N1QLHelper(shell=self.shell, max_verify=self.max_verify, buckets=self.buckets,
                                   item_flag=self.item_flag, n1ql_port=self.n1ql_port,
                                   full_docs_list=self.full_docs_list, log=self.log, input=self.input,
                                   master=self.master, use_rest=True)
     # primary index is required as we run some queries from handler code
     n1ql_helper.create_primary_index(using_gsi=True, server=n1ql_node)
 def setUp(self):
     super(EventingMultiHandler, self).setUp()
     self.num_src_buckets = self.input.param('num_src_buckets', 1)
     self.num_dst_buckets = self.input.param('num_dst_buckets', 1)
     self.num_handlers = self.input.param('num_handlers', 1)
     self.deploy_handler = self.input.param('deploy_handler', 1)
     self.sequential = self.input.param('sequential', True)
     self.num_pause = self.input.param('num_pause', 0)
     self.worker_count = self.input.param('worker_count', 1)
     self.handler_code = self.input.param('handler_code',
                                          'handler_code/ABO/insert.js')
     self.gens_load = self.generate_docs(self.docs_per_day)
     quota = (self.num_src_buckets + self.num_dst_buckets) * 100 + 300
     self.rest.set_service_memoryQuota(service='memoryQuota',
                                       memoryQuota=quota)
     self.metadata_bucket_size = 300
     bucket_params_meta = self._create_bucket_params(
         server=self.server,
         size=self.metadata_bucket_size,
         replicas=self.num_replicas)
     self.create_n_buckets(self.src_bucket_name, self.num_src_buckets)
     self.buckets = RestConnection(self.master).get_buckets()
     if self.num_dst_buckets > 0:
         self.create_n_buckets(self.dst_bucket_name, self.num_dst_buckets)
     self.cluster.create_standard_bucket(name=self.metadata_bucket_name,
                                         port=STANDARD_BUCKET_PORT + 1,
                                         bucket_params=bucket_params_meta)
     self.create_scope_collection(bucket=self.metadata_bucket_name,
                                  scope=self.metadata_bucket_name,
                                  collection=self.metadata_bucket_name)
     self.deploying = []
     self.pausing = []
     self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
     self.n1ql_helper = N1QLHelper(shell=self.shell,
                                   max_verify=self.max_verify,
                                   buckets=self.buckets,
                                   item_flag=self.item_flag,
                                   n1ql_port=self.n1ql_port,
                                   full_docs_list=self.full_docs_list,
                                   log=self.log,
                                   input=self.input,
                                   master=self.master,
                                   use_rest=True)
     self.n1ql_helper.create_primary_index(using_gsi=True,
                                           server=self.n1ql_node)
     self.binding_map = {}
Beispiel #10
0
 def test_n1ql_max_connection(self):
     self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
     self.n1ql_helper = N1QLHelper(shell=self.shell,
                                   max_verify=self.max_verify,
                                   buckets=self.buckets,
                                   item_flag=self.item_flag,
                                   n1ql_port=self.n1ql_port,
                                   full_docs_list=self.full_docs_list,
                                   log=self.log,
                                   input=self.input,
                                   master=self.master,
                                   use_rest=True)
     # primary index is required as we run some queries from handler code
     self.n1ql_helper.create_primary_index(using_gsi=True,
                                           server=self.n1ql_node)
     self.load_sample_buckets(self.server, "travel-sample")
     self.load(self.gens_load,
               buckets=self.src_bucket,
               flag=self.item_flag,
               verify_data=False,
               batch_size=self.batch_size)
     body = self.create_save_function_body(
         self.function_name,
         'handler_code/n1ql_op_connection.js',
         worker_count=1)
     self.deploy_function(body)
     self.verify_eventing_results(self.function_name,
                                  self.docs_per_day * 2016,
                                  skip_stats_validation=True)
     # delete all documents
     self.load(self.gens_load,
               buckets=self.src_bucket,
               flag=self.item_flag,
               verify_data=False,
               batch_size=self.batch_size,
               op_type='delete')
     # Wait for eventing to catch up with all the delete mutations and verify results
     self.verify_eventing_results(self.function_name,
                                  0,
                                  on_delete=True,
                                  skip_stats_validation=True)
     self.undeploy_and_delete_function(body)
Beispiel #11
0
 def create_index(self, bucket, query_definition, deploy_node_info=None):
     defer_build = True
     query = query_definition.generate_index_create_query(
         bucket=bucket,
         use_gsi_for_secondary=self.use_gsi_for_secondary,
         deploy_node_info=deploy_node_info,
         defer_build=defer_build,
         num_replica=self.num_index_replicas)
     log.info(query)
     # Define Helper Method which will be used for running n1ql queries, create index, drop index
     self.n1ql_helper = N1QLHelper(shell=self.shell,
                                   max_verify=self.max_verify,
                                   buckets=self.buckets,
                                   item_flag=self.item_flag,
                                   n1ql_port=self.n1ql_port,
                                   full_docs_list=self.full_docs_list,
                                   log=self.log,
                                   input=self.input,
                                   master=self.master,
                                   use_rest=True)
     create_index_task = self.cluster.async_create_index(
         server=self.n1ql_server,
         bucket=bucket,
         query=query,
         n1ql_helper=self.n1ql_helper,
         index_name=query_definition.index_name,
         defer_build=defer_build)
     create_index_task.result()
     query = self.n1ql_helper.gen_build_index_query(
         bucket=bucket, index_list=[query_definition.index_name])
     build_index_task = self.cluster.async_build_index(
         server=self.n1ql_server,
         bucket=bucket,
         query=query,
         n1ql_helper=self.n1ql_helper)
     build_index_task.result()
     check = self.n1ql_helper.is_index_ready_and_in_list(
         bucket, query_definition.index_name, server=self.n1ql_server)
     self.assertTrue(
         check, "index {0} failed to be created".format(
             query_definition.index_name))
Beispiel #12
0
 def setUp(self):
     super(AggregatePushdownRecoveryClass, self).setUp()
     self.n1ql_helper = N1QLHelper(master=self.master)
     self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
     self.aggr_distinct = self.input.param("aggr_distinct", False)
     self.graceful = self.input.param("graceful", False)
Beispiel #13
0
    def setUp(self):
        super(CrashTest, self).setUp()

        self.doc_ops = self.input.param("doc_ops", None)
        self.process_name = self.input.param("process", None)
        self.service_name = self.input.param("service", "data")
        self.sig_type = self.input.param("sig_type", "SIGKILL").upper()
        self.target_node = self.input.param("target_node", "active")
        self.client_type = self.input.param("client_type", "sdk").lower()
        self.N1qltxn = self.input.param("N1qltxn", False)

        self.pre_warmup_stats = dict()
        self.timeout = 120
        self.new_docs_to_add = 10000

        if self.doc_ops is not None:
            self.doc_ops = self.doc_ops.split(";")

        if not self.atomicity:
            self.durability_helper = DurabilityHelper(
                self.log, self.nodes_init,
                durability=self.durability_level,
                replicate_to=self.replicate_to,
                persist_to=self.persist_to)

        verification_dict = dict()
        verification_dict["ops_create"] = \
            self.cluster.buckets[0].scopes[
                CbServer.default_scope].collections[
                CbServer.default_collection].num_items
        verification_dict["sync_write_aborted_count"] = 0
        verification_dict["rollback_item_count"] = 0
        verification_dict["pending_writes"] = 0
        if self.durability_level:
            verification_dict["sync_write_committed_count"] = \
                verification_dict["ops_create"]

        # Load initial documents into the buckets
        transaction_gen_create = doc_generator(
            "transaction_key", 0, self.num_items,
            key_size=self.key_size,
            doc_size=self.doc_size,
            doc_type=self.doc_type,
            target_vbucket=self.target_vbucket,
            vbuckets=self.cluster_util.vbuckets)
        gen_create = doc_generator(
            self.key, 0, self.num_items,
            key_size=self.key_size,
            doc_size=self.doc_size,
            doc_type=self.doc_type,
            target_vbucket=self.target_vbucket,
            vbuckets=self.cluster_util.vbuckets)
        if self.atomicity:
            transaction_task = self.task.async_load_gen_docs_atomicity(
                self.cluster, self.cluster.buckets,
                transaction_gen_create, DocLoading.Bucket.DocOps.CREATE,
                exp=0,
                batch_size=10,
                process_concurrency=self.process_concurrency,
                replicate_to=self.replicate_to,
                persist_to=self.persist_to,
                durability=self.durability_level,
                timeout_secs=self.sdk_timeout,
                update_count=self.update_count,
                transaction_timeout=self.transaction_timeout,
                commit=True,
                sync=self.sync)
            self.task.jython_task_manager.get_task_result(transaction_task)
        for bucket in self.cluster.buckets:
            task = self.task.async_load_gen_docs(
                self.cluster, bucket, gen_create,
                DocLoading.Bucket.DocOps.CREATE, self.maxttl,
                persist_to=self.persist_to,
                replicate_to=self.replicate_to,
                durability=self.durability_level,
                batch_size=10, process_concurrency=8)
            self.task.jython_task_manager.get_task_result(task)
            self.bucket_util._wait_for_stats_all_buckets(self.cluster.buckets)

            self.cluster.buckets[0].scopes[
                CbServer.default_scope].collections[
                CbServer.default_collection].num_items += self.num_items
            verification_dict["ops_create"] += self.num_items
            if self.durability_level:
                verification_dict["sync_write_committed_count"] += \
                    self.num_items
            # Verify cbstats vbucket-details
            stats_failed = self.durability_helper.verify_vbucket_details_stats(
                bucket, self.cluster_util.get_kv_nodes(),
                vbuckets=self.cluster_util.vbuckets,
                expected_val=verification_dict)

            if self.atomicity is False:
                if stats_failed:
                    self.fail("Cbstats verification failed")
                self.bucket_util.verify_stats_all_buckets(
                    self.cluster,
                    self.cluster.buckets[0].scopes[
                        CbServer.default_scope].collections[
                        CbServer.default_collection].num_items)
        self.bucket = self.cluster.buckets[0]
        if self.N1qltxn:
            self.n1ql_server = self.cluster_util.get_nodes_from_services_map(
                                service_type="n1ql",
                                get_all_nodes=True)
            self.n1ql_helper = N1QLHelper(server=self.n1ql_server,
                                          use_rest=True,
                                          buckets=self.cluster.buckets,
                                          log=self.log,
                                          scan_consistency='REQUEST_PLUS',
                                          num_collection=3,
                                          num_buckets=1,
                                          num_savepoints=1,
                                          override_savepoint=False,
                                          num_stmt=10,
                                          load_spec=self.data_spec_name)
            self.bucket_col = self.n1ql_helper.get_collections()
            self.stmts = self.n1ql_helper.get_stmt(self.bucket_col)
            self.stmts = self.n1ql_helper.create_full_stmts(self.stmts)
        self.log.info("==========Finished CrashTest setup========")
Beispiel #14
0
 def setUp(self):
     super(AggregatePushdownClass, self).setUp()
     self.n1ql_helper = N1QLHelper(master=self.master)
     self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
Beispiel #15
0
    def setUp(self):
        super(QueryTests, self).setUp()
        self.expiry = self.input.param("expiry", 0)
        self.batch_size = self.input.param("batch_size", 1)
        self.scan_consistency = self.input.param("scan_consistency",
                                                 "request_plus")
        self.skip_cleanup = self.input.param("skip_cleanup", False)
        self.run_async = self.input.param("run_async", True)
        self.version = self.input.param("cbq_version", "git_repo")
        for server in self.servers:
            rest = RestConnection(server)
            temp = rest.cluster_status()
            self.log.info("Initial status of {0} cluster is {1}".format(
                server.ip, temp['nodes'][0]['status']))
            while (temp['nodes'][0]['status'] == 'warmup'):
                self.log.info("Waiting for cluster to become healthy")
                self.sleep(5)
                temp = rest.cluster_status()
            self.log.info("current status of {0}  is {1}".format(
                server.ip, temp['nodes'][0]['status']))

        indexer_node = self.get_nodes_from_services_map(service_type="index",
                                                        get_all_nodes=True)
        # Set indexer storage mode
        indexer_rest = RestConnection(indexer_node[0])
        doc = {"indexer.settings.storage_mode": self.gsi_type}
        indexer_rest.set_index_settings_internal(doc)
        doc = {"indexer.api.enableTestServer": True}
        indexer_rest.set_index_settings_internal(doc)
        self.indexer_scanTimeout = self.input.param("indexer_scanTimeout",
                                                    None)
        if self.indexer_scanTimeout is not None:
            for server in indexer_node:
                rest = RestConnection(server)
                rest.set_index_settings({
                    "indexer.settings.scan_timeout":
                    self.indexer_scanTimeout
                })
        if self.input.tuq_client and "client" in self.input.tuq_client:
            self.shell = RemoteMachineShellConnection(
                self.input.tuq_client["client"])
        else:
            self.shell = RemoteMachineShellConnection(self.master)
        self.use_gsi_for_primary = self.input.param("use_gsi_for_primary",
                                                    True)
        self.use_gsi_for_secondary = self.input.param("use_gsi_for_secondary",
                                                      True)
        self.create_primary_index = self.input.param("create_primary_index",
                                                     True)
        self.use_rest = self.input.param("use_rest", True)
        self.max_verify = self.input.param("max_verify", None)
        self.buckets = RestConnection(self.master).get_buckets()
        self.docs_per_day = self.input.param("doc-per-day", 49)
        self.item_flag = self.input.param("item_flag", 4042322160)
        self.n1ql_port = self.input.param("n1ql_port", 8093)
        self.dataset = self.input.param("dataset", "default")
        self.value_size = self.input.param("value_size", 1024)
        self.doc_ops = self.input.param("doc_ops", False)
        self.create_ops_per = self.input.param("create_ops_per", 0)
        self.expiry_ops_per = self.input.param("expiry_ops_per", 0)
        self.delete_ops_per = self.input.param("delete_ops_per", 0)
        self.update_ops_per = self.input.param("update_ops_per", 0)
        self.gens_load = self.generate_docs(self.docs_per_day)
        if self.input.param("gomaxprocs", None):
            self.n1ql_helper.configure_gomaxprocs()
        self.full_docs_list = self.generate_full_docs_list(self.gens_load)
        self.gen_results = TuqGenerators(self.log, self.full_docs_list)
        verify_data = False
        if self.scan_consistency != "request_plus":
            verify_data = True
        self.load(self.gens_load,
                  flag=self.item_flag,
                  verify_data=verify_data,
                  batch_size=self.batch_size)
        if self.doc_ops:
            self.ops_dist_map = self.calculate_data_change_distribution(
                create_per=self.create_ops_per,
                update_per=self.update_ops_per,
                delete_per=self.delete_ops_per,
                expiry_per=self.expiry_ops_per,
                start=0,
                end=self.docs_per_day)
            self.log.info(self.ops_dist_map)
            self.docs_gen_map = self.generate_ops_docs(self.docs_per_day, 0)
            self.full_docs_list_after_ops = self.generate_full_docs_list_after_ops(
                self.docs_gen_map)
        # Define Helper Method which will be used for running n1ql queries, create index, drop index
        self.n1ql_helper = N1QLHelper(version=self.version,
                                      shell=self.shell,
                                      use_rest=self.use_rest,
                                      max_verify=self.max_verify,
                                      buckets=self.buckets,
                                      item_flag=self.item_flag,
                                      n1ql_port=self.n1ql_port,
                                      full_docs_list=self.full_docs_list,
                                      log=self.log,
                                      input=self.input,
                                      master=self.master)
        self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
        self.log.info(self.n1ql_node)
        #self.n1ql_helper._start_command_line_query(self.n1ql_node)
        # sleep to avoid race condition during bootstrap
        if self.create_primary_index:
            try:
                self.n1ql_helper.create_primary_index(
                    using_gsi=self.use_gsi_for_primary, server=self.n1ql_node)
            except Exception as ex:
                self.log.info(ex)
                raise ex
 def __init__(self, node):
     self.log = logger.Logger.get_logger()
     self.node = node
     self.use_rest = True
     self.n1ql_helper = N1QLHelper(use_rest=True, log=self.log)
     self.cli_helper = CollectionsCLI(node)
 def setUp(self):
     super(AggregatePushdownClass, self).setUp()
     self.n1ql_helper = N1QLHelper(master=self.master)
     self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
     self.array_type = self.input.param("array_type", "all")
     self.aggr_distinct = self.input.param("aggr_distinct", False)
Beispiel #18
0
 def setUp(self):
     if self._testMethodDoc:
         log.info("\n\nStarting Test: %s \n%s" %
                  (self._testMethodName, self._testMethodDoc))
     else:
         log.info("\n\nStarting Test: %s" % (self._testMethodName))
     self.input = TestInputSingleton.input
     self.input.test_params.update({"default_bucket": False})
     super(EventingBaseTest, self).setUp()
     self.master = self.servers[0]
     self.server = self.master
     self.restServer = self.get_nodes_from_services_map(
         service_type="eventing")
     self.rest = RestConnection(self.restServer)
     self.rest.set_indexer_storage_mode()
     self.log.info(
         "Setting the min possible memory quota so that adding mode nodes to the cluster wouldn't be a problem."
     )
     self.rest.set_service_memoryQuota(service='memoryQuota',
                                       memoryQuota=330)
     self.rest.set_service_memoryQuota(service='indexMemoryQuota',
                                       memoryQuota=INDEX_QUOTA)
     # self.rest.set_service_memoryQuota(service='eventingMemoryQuota', memoryQuota=EVENTING_QUOTA)
     self.src_bucket_name = self.input.param('src_bucket_name',
                                             'src_bucket')
     self.eventing_log_level = self.input.param('eventing_log_level',
                                                'INFO')
     self.dst_bucket_name = self.input.param('dst_bucket_name',
                                             'dst_bucket')
     self.dst_bucket_name1 = self.input.param('dst_bucket_name1',
                                              'dst_bucket1')
     self.metadata_bucket_name = self.input.param('metadata_bucket_name',
                                                  'metadata')
     self.create_functions_buckets = self.input.param(
         'create_functions_buckets', True)
     self.docs_per_day = self.input.param("doc-per-day", 1)
     self.use_memory_manager = self.input.param('use_memory_manager', True)
     self.print_eventing_handler_code_in_logs = self.input.param(
         'print_eventing_handler_code_in_logs', True)
     random.seed(datetime.time)
     function_name = "Function_{0}_{1}".format(
         random.randint(1, 1000000000), self._testMethodName)
     # See MB-28447, From now function name can only be max of 100 chars
     self.function_name = function_name[0:90]
     self.timer_storage_chan_size = self.input.param(
         'timer_storage_chan_size', 10000)
     self.dcp_gen_chan_size = self.input.param('dcp_gen_chan_size', 10000)
     self.is_sbm = self.input.param('source_bucket_mutation', False)
     self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
     self.n1ql_helper = N1QLHelper(shell=self.shell,
                                   max_verify=self.max_verify,
                                   buckets=self.buckets,
                                   item_flag=self.item_flag,
                                   n1ql_port=self.n1ql_port,
                                   full_docs_list=self.full_docs_list,
                                   log=self.log,
                                   input=self.input,
                                   master=self.master,
                                   use_rest=True)
     self.pause_resume = self.input.param('pause_resume', False)
     self.pause_resume_number = self.input.param('pause_resume_number', 1)
Beispiel #19
0
    def load_gsi_fts_indexes(self):
        def create_gsi_index(b_name, t_index):
            query = gsi_create_template % (b_name.replace(
                ".", ""), t_index, b_name, self.index_replica)
            self.log.debug("Executing query: %s" % query)
            try:
                n1ql_helper.run_cbq_query(query)
            except Exception as e:
                self.log.critical(e)

        def drop_gsi_index(b_name, t_index):
            query = gsi_drop_template % (b_name, b_name.replace(".",
                                                                ""), t_index)
            self.log.debug("Executing query: %s" % query)
            try:
                n1ql_helper.run_cbq_query(query)
            except Exception as e:
                self.log.critical(e)

        def create_fts_index(b_name, t_index):
            fts_index_name = "%s_fts_%d" % (b_name.replace(".", ""), t_index)
            status, content = fts_helper.create_fts_index_from_json(
                fts_index_name, fts_param_template %
                (fts_index_name, b_name, self.fts_index_partitions))
            if status is False:
                self.fail("Failed to create fts index %s: %s" %
                          (fts_index_name, content))

        def drop_fts_index(b_name, t_index):
            fts_index_name = "%s_fts_%d" % (b_name.replace(".", ""), t_index)
            status, content = fts_helper.delete_fts_index(fts_index_name)
            if status is False:
                self.fail("Failed to drop fts index %s: %s" %
                          (fts_index_name, content))

        n1ql_node = self.cluster.query_nodes[0]
        fts_helper = FtsHelper(self.cluster.fts_nodes[0])
        n1ql_helper = N1QLHelper(server=n1ql_node, use_rest=True, log=self.log)

        gsi_index_name_pattern = "%s_primary_%d"
        gsi_create_template = "CREATE PRIMARY INDEX `" \
                              + gsi_index_name_pattern \
                              + "` on `%s` USING GSI " \
                              + "WITH {\"num_replica\": %s}"
        gsi_drop_template = "DROP INDEX `%s`.`" + gsi_index_name_pattern \
                            + "` USING GSI"
        fts_param_template = '{ \
            "type": "fulltext-index", \
            "name": "%s", \
            "sourceType": "couchbase", \
            "sourceName": "%s", \
            "planParams": { \
              "maxPartitionsPerPIndex": 171, \
              "indexPartitions": %d \
            }, \
            "params": { \
              "doc_config": { \
                "docid_prefix_delim": "", \
                "docid_regexp": "", \
                "mode": "type_field", \
                "type_field": "type" \
              }, \
              "mapping": { \
                "analysis": {}, \
                "default_analyzer": "standard", \
                "default_datetime_parser": "dateTimeOptional", \
                "default_field": "_all", \
                "default_mapping": { \
                  "dynamic": false, \
                  "enabled": true, \
                  "properties": { \
                    "rsx": { \
                      "dynamic": false, \
                      "enabled": true, \
                      "fields": [ \
                        { \
                          "index": true, \
                          "name": "rsx", \
                          "type": "text" \
                        } \
                      ] \
                    } \
                  } \
                }, \
                "default_type": "_default", \
                "docvalues_dynamic": true, \
                "index_dynamic": true, \
                "store_dynamic": false, \
                "type_field": "_type" \
              }, \
              "store": { \
                "indexType": "scorch" \
              } \
            }, \
            "sourceParams": {} \
        }'

        # Open SDK for connection for running n1ql queries
        client = SDKClient([self.cluster.master], self.cluster.buckets[0])

        for bucket in self.cluster.buckets[:4]:
            self.log.info("Creating GSI indexes %d::%d for %s" %
                          (0, self.num_gsi_index, bucket.name))
            for index in range(0, self.num_gsi_index):
                create_gsi_index(bucket.name, index)
            self.log.info("Done creating GSI indexes for %s" % bucket.name)

        for bucket in self.cluster.buckets[:3]:
            self.log.info("Creating FTS indexes %d::%d for %s" %
                          (0, self.num_fts_index, bucket.name))
            for index in range(0, self.num_fts_index):
                create_fts_index(bucket.name, index)
            self.log.info("Done creating FTS indexes for %s" % bucket.name)

        for bucket in self.cluster.buckets[:4]:
            self.log.info("Create and drop %s GSI indexes on %s" %
                          (self.gsi_indexes_to_create_drop, bucket.name))
            for index in range(
                    self.num_gsi_index,
                    self.num_gsi_index + self.gsi_indexes_to_create_drop):
                create_gsi_index(bucket.name, index)
                drop_gsi_index(bucket.name, index)

        for bucket in self.cluster.buckets[:3]:
            self.log.info("Create and drop %s FTS indexes on %s" %
                          (self.fts_indexes_to_create_drop, bucket.name))
            for index in range(
                    self.num_fts_index,
                    self.num_fts_index + self.fts_indexes_to_create_drop):
                create_fts_index(bucket.name, index)
                drop_fts_index(bucket.name, index)

        # Close the SDK connection
        client.close()
Beispiel #20
0
    def setUp(self):
        super(ConfigPurging, self).setUp()
        is_windows = False

        for node in self.cluster.servers:
            shell = RemoteMachineShellConnection(node)
            if shell.info.type.lower() == OS.WINDOWS:
                is_windows = True
            shell.enable_diag_eval_on_non_local_hosts()
            shell.disconnect()

        self.cluster_util.update_cluster_nodes_service_list(self.cluster)

        # Default purger values
        self.default_run_interval = 60
        self.default_purge_age = 300

        self.time_stamp = time()
        self.num_index = self.input.param("num_index", 0)
        self.index_type = self.input.param("index_type", CbServer.Services.FTS)
        self.index_name_len = self.input.param("index_name_len", 10)
        self.fts_index_partition = self.input.param("fts_index_partition", 1)
        self.index_replicas = self.input.param("gsi_index_replicas", 1)
        self.fts_helper = FtsHelper(self.cluster.fts_nodes[0]) \
            if self.cluster.fts_nodes else None
        self.n1ql_helper = N1QLHelper(server=self.cluster.query_nodes[0],
                                      use_rest=True, log=self.log) \
            if self.cluster.query_nodes else None
        self.spare_node = self.servers[-1]
        self.couchbase_base_dir = "/opt/couchbase"
        if is_windows:
            self.couchbase_base_dir = \
                "/cygdrive/c/Program\\ Files/Couchbase/Server"

        # Param order:
        # fts_name, bucket_name, index_partitions, scope_name, collection_name
        self.fts_param_template = '{ \
          "type": "fulltext-index", \
          "name": "%s", \
          "sourceType": "gocbcore", \
          "sourceName": "%s", \
          "sourceUUID": "%s", \
          "planParams": { \
            "maxPartitionsPerPIndex": 1024, \
            "indexPartitions": %d \
          }, \
          "params": { \
            "doc_config": { \
              "docid_prefix_delim": "", \
              "docid_regexp": "", \
              "mode": "scope.collection.type_field", \
              "type_field": "type" \
            }, \
            "mapping": { \
              "analysis": {}, \
              "default_analyzer": "standard", \
              "default_datetime_parser": "dateTimeOptional", \
              "default_field": "_all", \
              "default_mapping": { \
                "dynamic": true, \
                "enabled": false \
              }, \
              "default_type": "_default", \
              "docvalues_dynamic": false, \
              "index_dynamic": true, \
              "store_dynamic": false, \
              "type_field": "_type", \
              "types": { \
                "%s.%s": { \
                  "dynamic": true, \
                  "enabled": true \
                } \
              } \
            }, \
            "store": { \
              "indexType": "scorch", \
              "segmentVersion": 15 \
            } \
          }, \
          "sourceParams": {} \
        }'

        self.gsi_index_name_template = "%s_%s_%s_%d"
        self.gsi_create_template = "CREATE PRIMARY INDEX `%s` " \
                                   "ON `%s`.`%s`.`%s` USING GSI " \
                                   "WITH {\"num_replica\": %d}"
        self.gsi_drop_template = "DROP INDEX `%s`.`%s` USING GSI"

        self.op_create = "key_create"
        self.op_remove = "key_delete"

        self.ts_during_start = self.__get_current_timestamps_from_debug_log()
        self.initial_tombstones = \
            self.cluster_util.get_metakv_dicts(self.cluster.master)
        self.log.info(self.ts_during_start)
Beispiel #21
0
 def setUp(self):
     super(USINGFTS, self).setUp()
     self.n1ql_helper = N1QLHelper(use_rest=True, log=self.log)
     self.use_multiindex = TestInputSingleton.input.param("use_multiindex", False)
     self.using_fts_and_gsi = TestInputSingleton.input.param("using_fts_and_gsi", False)
     self.specific_index = TestInputSingleton.input.param("specific_index", False)