Exemple #1
0
 def test_n1ql_iterators_with_break_and_continue(self):
     values = ['1', '10']
     # create 100 non json docs
     # number of docs is intentionally reduced as handler code runs 1 n1ql queries/mutation
     gen_load_non_json = JSONNonDocGenerator('non_json_docs', values, start=0, end=100)
     gen_load_non_json_del = copy.deepcopy(gen_load_non_json)
     self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
     self.n1ql_helper = N1QLHelper(shell=self.shell,
                                   max_verify=self.max_verify,
                                   buckets=self.buckets,
                                   item_flag=self.item_flag,
                                   n1ql_port=self.n1ql_port,
                                   full_docs_list=self.full_docs_list,
                                   log=self.log, input=self.input,
                                   master=self.master,
                                   use_rest=True
                                   )
     # primary index is required as we run some queries from handler code
     self.n1ql_helper.create_primary_index(using_gsi=True, server=self.n1ql_node)
     # load the data
     self.cluster.load_gen_docs(self.master, self.src_bucket_name, gen_load_non_json, self.buckets[0].kvs[1],
                                'create', compression=self.sdk_compression)
     body = self.create_save_function_body(self.function_name, HANDLER_CODE.N1QL_ITERATORS, execution_timeout=60)
     self.deploy_function(body)
     # Wait for eventing to catch up with all the update mutations and verify results
     self.verify_eventing_results(self.function_name, 100)
     # delete all the docs
     self.cluster.load_gen_docs(self.master, self.src_bucket_name, gen_load_non_json_del, self.buckets[0].kvs[1],
                                'delete', compression=self.sdk_compression)
     # Wait for eventing to catch up with all the delete mutations and verify results
     self.verify_eventing_results(self.function_name, 0, skip_stats_validation=True)
     # undeploy and delete the function
     self.undeploy_and_delete_function(body)
     # delete all the primary indexes
     self.n1ql_helper.drop_primary_index(using_gsi=True, server=self.n1ql_node)
Exemple #2
0
 def create_secondary_index_query(self, index_field, index_query=None,step='before'):
     '''
     1. Create a index
     2. Run query for the index
     '''
     
     try:
         self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
         if self.n1ql_node is not None:
             self.n1ql_helper = N1QLHelper(shell=self.shell,
                                               max_verify=self.max_verify,
                                               buckets=self.buckets,
                                               item_flag=self.item_flag,
                                               n1ql_port=self.n1ql_port,
                                               full_docs_list=self.full_docs_list,
                                               log=self.log, input=self.input,
                                               master=self.n1ql_node,
                                               use_rest=True
                                               )
             query = "Create index " + index_field + " on default(" + index_field + ")"
             self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_node)
             if step == 'before':
                 query = 'create primary index on default'
                 self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_node)
             self.perform_doc_ops_in_all_cb_buckets(2000,'create',end_key=2000)
             self.sleep(10)
             query = "select * from default where " + index_field + " is not NULL"
             self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_node)
             if index_query is not None:
                 query = "select * from default where " + index_query + " is not NULL"
                 self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_node)
                 query = "select * from default"
                 self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_node)
     except:
         raise Exception("Error while creating index/n1ql setup")
 def setUp(self):
     super(EventingBucket, self).setUp()
     self.rest.set_service_memoryQuota(service='memoryQuota',
                                       memoryQuota=700)
     if self.create_functions_buckets:
         self.bucket_size = 100
         self.metadata_bucket_size = 400
         log.info(self.bucket_size)
         bucket_params = self._create_bucket_params(
             server=self.server,
             size=self.bucket_size,
             replicas=self.num_replicas)
         bucket_params_meta = self._create_bucket_params(
             server=self.server,
             size=self.metadata_bucket_size,
             replicas=self.num_replicas)
         self.cluster.create_standard_bucket(name=self.src_bucket_name,
                                             port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.src_bucket = RestConnection(self.master).get_buckets()
         self.cluster.create_standard_bucket(name=self.dst_bucket_name,
                                             port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.cluster.create_standard_bucket(
             name=self.metadata_bucket_name,
             port=STANDARD_BUCKET_PORT + 1,
             bucket_params=bucket_params_meta)
         self.buckets = RestConnection(self.master).get_buckets()
     self.gens_load = self.generate_docs(self.docs_per_day)
     self.expiry = 3
     handler_code = self.input.param('handler_code', 'bucket_op')
     if handler_code == 'bucket_op':
         self.handler_code = HANDLER_CODE.DELETE_BUCKET_OP_ON_DELETE
     elif handler_code == 'bucket_op_with_timers':
         self.handler_code = HANDLER_CODE.BUCKET_OPS_WITH_TIMERS
     elif handler_code == 'bucket_op_with_cron_timers':
         self.handler_code = HANDLER_CODE.BUCKET_OPS_WITH_CRON_TIMERS
     elif handler_code == 'n1ql_op_with_timers':
         # index is required for delete operation through n1ql
         self.n1ql_node = self.get_nodes_from_services_map(
             service_type="n1ql")
         self.n1ql_helper = N1QLHelper(shell=self.shell,
                                       max_verify=self.max_verify,
                                       buckets=self.buckets,
                                       item_flag=self.item_flag,
                                       n1ql_port=self.n1ql_port,
                                       full_docs_list=self.full_docs_list,
                                       log=self.log,
                                       input=self.input,
                                       master=self.master,
                                       use_rest=True)
         self.n1ql_helper.create_primary_index(using_gsi=True,
                                               server=self.n1ql_node)
         self.handler_code = HANDLER_CODE.N1QL_OPS_WITH_TIMERS
     elif handler_code == 'source_bucket_mutation':
         self.handler_code = HANDLER_CODE.BUCKET_OP_WITH_SOURCE_BUCKET_MUTATION
     elif handler_code == 'source_bucket_mutation_timers':
         self.handler_code = HANDLER_CODE.BUCKET_OP_SOURCE_BUCKET_MUTATION_WITH_TIMERS
     else:
         self.handler_code = HANDLER_CODE.DELETE_BUCKET_OP_ON_DELETE
Exemple #4
0
 def setUp(self):
     super(EventingRQG, self).setUp()
     if self.create_functions_buckets:
         self.bucket_size = 100
         log.info(self.bucket_size)
         bucket_params = self._create_bucket_params(server=self.server, size=self.bucket_size,
                                                    replicas=self.num_replicas)
         self.cluster.create_standard_bucket(name=self.src_bucket_name, port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.src_bucket = RestConnection(self.master).get_buckets()
         self.cluster.create_standard_bucket(name=self.dst_bucket_name, port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.cluster.create_standard_bucket(name=self.metadata_bucket_name, port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.buckets = RestConnection(self.master).get_buckets()
     self.gens_load = self.generate_docs(self.docs_per_day)
     self.expiry = 3
     self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
     self.n1ql_helper = N1QLHelper(shell=self.shell,
                                   max_verify=self.max_verify,
                                   buckets=self.buckets,
                                   item_flag=self.item_flag,
                                   n1ql_port=self.n1ql_port,
                                   full_docs_list=self.full_docs_list,
                                   log=self.log, input=self.input,
                                   master=self.master,
                                   use_rest=True
                                   )
     self.number_of_handler = self.input.param('number_of_handler', 5)
     self.number_of_queries = self.input.param('number_of_queries',None)
     self.template_file=self.input.param('template_file','b/resources/rqg/simple_table_db/query_tests_using_templates/query_10000_fields.txt.zip')
 def setUp(self):
     super(EventingN1QL, self).setUp()
     if self.create_functions_buckets:
         self.bucket_size = 100
         log.info(self.bucket_size)
         bucket_params = self._create_bucket_params(
             server=self.server,
             size=self.bucket_size,
             replicas=self.num_replicas)
         self.cluster.create_standard_bucket(name=self.src_bucket_name,
                                             port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.src_bucket = RestConnection(self.master).get_buckets()
         self.cluster.create_standard_bucket(name=self.dst_bucket_name,
                                             port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.cluster.create_standard_bucket(name=self.metadata_bucket_name,
                                             port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.buckets = RestConnection(self.master).get_buckets()
     self.gens_load = self.generate_docs(self.docs_per_day)
     self.expiry = 3
     self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
     self.n1ql_helper = N1QLHelper(shell=self.shell,
                                   max_verify=self.max_verify,
                                   buckets=self.buckets,
                                   item_flag=self.item_flag,
                                   n1ql_port=self.n1ql_port,
                                   full_docs_list=self.full_docs_list,
                                   log=self.log,
                                   input=self.input,
                                   master=self.master,
                                   use_rest=True)
Exemple #6
0
 def setUp(self):
     super(EventingConcurrency, self).setUp()
     self.rest.set_service_memoryQuota(service='memoryQuota', memoryQuota=700)
     if self.create_functions_buckets:
         self.replicas = self.input.param("replicas", 0)
         self.bucket_size = 100
         # This is needed as we have increased the context size to 93KB. If this is not increased the metadata
         # bucket goes into heavy DGM
         self.metadata_bucket_size = 300
         log.info(self.bucket_size)
         bucket_params = self._create_bucket_params(server=self.server, size=self.bucket_size,
                                                    replicas=self.replicas)
         bucket_params_meta = self._create_bucket_params(server=self.server, size=self.metadata_bucket_size,
                                                         replicas=self.replicas)
         self.cluster.create_standard_bucket(name=self.src_bucket_name, port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.src_bucket = RestConnection(self.master).get_buckets()
         self.cluster.create_standard_bucket(name=self.dst_bucket_name, port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.cluster.create_standard_bucket(name=self.dst_bucket_name1, port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.cluster.create_standard_bucket(name=self.metadata_bucket_name, port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params_meta)
         self.buckets = RestConnection(self.master).get_buckets()
     self.gens_load = self.generate_docs(self.docs_per_day)
     self.expiry = 3
     self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
     self.n1ql_helper = N1QLHelper(shell=self.shell, max_verify=self.max_verify, buckets=self.buckets,
                                   item_flag=self.item_flag, n1ql_port=self.n1ql_port,
                                   full_docs_list=self.full_docs_list, log=self.log, input=self.input,
                                   master=self.master, use_rest=True)
     self.n1ql_helper.create_primary_index(using_gsi=True, server=self.n1ql_node)
Exemple #7
0
 def test_curl_with_different_handlers_pause_resume_n1ql(self):
     self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
     self.n1ql_helper = N1QLHelper(shell=self.shell, max_verify=self.max_verify, buckets=self.buckets,
                                   item_flag=self.item_flag, n1ql_port=self.n1ql_port,
                                   full_docs_list=self.full_docs_list, log=self.log, input=self.input,
                                   master=self.master, use_rest=True)
     self.n1ql_helper.create_primary_index(using_gsi=True, server=self.n1ql_node)
     gen_load_del = copy.deepcopy(self.gens_load)
     body = self.create_save_function_body(self.function_name, self.handler_code,
                                           worker_count=3)
     self.deploy_function(body)
     # load some data
     task = self.cluster.async_load_gen_docs(self.master, self.src_bucket_name, self.gens_load,
                                             self.buckets[0].kvs[1], 'create', compression=self.sdk_compression)
     self.pause_function(body)
     task.result()
     self.resume_function(body)
     # Wait for eventing to catch up with all the create mutations and verify results
     self.verify_eventing_results(self.function_name, self.docs_per_day * 2016,skip_stats_validation=True)
     # delete json documents
     self.load(gen_load_del, buckets=self.src_bucket, flag=self.item_flag, verify_data=False,
               batch_size=self.batch_size, op_type='delete')
     self.pause_resume_n(body,1)
     self.verify_eventing_results(self.function_name, 0,skip_stats_validation=True)
     self.undeploy_and_delete_function(body)
Exemple #8
0
 def setUp(self):
     super(EventingVolume, self).setUp()
     # Un-deploy and delete all the functions
     self.undeploy_delete_all_functions()
     self.dst_bucket_name2 = self.input.param('dst_bucket_name2',
                                              'dst_bucket2')
     self.worker_count = self.input.param('worker_count', 3)
     self.cpp_worker_thread_count = self.input.param(
         'cpp_worker_thread_count', 3)
     # self.rest.set_service_memoryQuota(service='memoryQuota', memoryQuota=3000)
     self.rest.set_service_memoryQuota(service='memoryQuota',
                                       memoryQuota=700)
     if self.create_functions_buckets:
         # self.bucket_size = 500
         # self.meta_bucket_size = 100
         self.bucket_size = 100
         self.meta_bucket_size = 100
         bucket_params = self._create_bucket_params(
             server=self.server,
             size=self.bucket_size,
             replicas=self.num_replicas)
         bucket_params_meta = self._create_bucket_params(
             server=self.server,
             size=self.meta_bucket_size,
             replicas=self.num_replicas)
         self.cluster.create_standard_bucket(name=self.src_bucket_name,
                                             port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.src_bucket = RestConnection(self.master).get_buckets()
         self.cluster.create_standard_bucket(name=self.dst_bucket_name,
                                             port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.cluster.create_standard_bucket(name=self.dst_bucket_name1,
                                             port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.cluster.create_standard_bucket(name=self.dst_bucket_name2,
                                             port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.cluster.create_standard_bucket(
             name=self.metadata_bucket_name,
             port=STANDARD_BUCKET_PORT + 1,
             bucket_params=bucket_params_meta)
         self.buckets = RestConnection(self.master).get_buckets()
     self.gens_load = self.generate_docs(self.docs_per_day)
     self.batch_size = 20
     # index is required for delete operation through n1ql
     self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
     self.n1ql_helper = N1QLHelper(shell=self.shell,
                                   max_verify=self.max_verify,
                                   buckets=self.buckets,
                                   item_flag=self.item_flag,
                                   n1ql_port=self.n1ql_port,
                                   full_docs_list=self.full_docs_list,
                                   log=self.log,
                                   input=self.input,
                                   master=self.master,
                                   use_rest=True)
     self.n1ql_helper.create_primary_index(using_gsi=True,
                                           server=self.n1ql_node)
Exemple #9
0
 def setUp(self):
     super(EventingDataset, self).setUp()
     self.rest.set_service_memoryQuota(service='memoryQuota',
                                       memoryQuota=700)
     if self.create_functions_buckets:
         self.replicas = self.input.param("replicas", 0)
         self.bucket_size = 100
         # This is needed as we have increased the context size to 93KB. If this is not increased the metadata
         # bucket goes into heavy DGM
         self.metadata_bucket_size = 400
         log.info(self.bucket_size)
         bucket_params = self._create_bucket_params(server=self.server,
                                                    size=self.bucket_size,
                                                    replicas=self.replicas)
         bucket_params_meta = self._create_bucket_params(
             server=self.server,
             size=self.metadata_bucket_size,
             replicas=self.replicas)
         self.cluster.create_standard_bucket(name=self.src_bucket_name,
                                             port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.src_bucket = RestConnection(self.master).get_buckets()
         self.cluster.create_standard_bucket(name=self.dst_bucket_name,
                                             port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.cluster.create_standard_bucket(
             name=self.metadata_bucket_name,
             port=STANDARD_BUCKET_PORT + 1,
             bucket_params=bucket_params_meta)
         self.buckets = RestConnection(self.master).get_buckets()
     self.gens_load = self.generate_docs(self.docs_per_day)
     self.expiry = 3
     self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
     self.n1ql_helper = N1QLHelper(shell=self.shell,
                                   max_verify=self.max_verify,
                                   buckets=self.buckets,
                                   item_flag=self.item_flag,
                                   n1ql_port=self.n1ql_port,
                                   full_docs_list=self.full_docs_list,
                                   log=self.log,
                                   input=self.input,
                                   master=self.master,
                                   use_rest=True)
     handler_code = self.input.param('handler_code', 'bucket_op')
     if handler_code == 'bucket_op':
         self.handler_code = HANDLER_CODE.DELETE_BUCKET_OP_ON_DELETE1
     elif handler_code == 'bucket_op_with_timers':
         self.handler_code = HANDLER_CODE.BUCKET_OPS_WITH_TIMERS
     elif handler_code == 'bucket_op_with_cron_timers':
         self.handler_code = HANDLER_CODE.BUCKET_OPS_WITH_CRON_TIMERS
     elif handler_code == 'n1ql_op_with_timers':
         self.n1ql_helper.create_primary_index(using_gsi=True,
                                               server=self.n1ql_node)
         self.handler_code = HANDLER_CODE.N1QL_OPS_WITH_TIMERS
     else:
         self.handler_code = HANDLER_CODE.DELETE_BUCKET_OP_ON_DELETE1
 def setUp(self):
     super(EventingBucketCache, self).setUp()
     self.rest.set_service_memoryQuota(service='memoryQuota',
                                       memoryQuota=1400)
     if self.create_functions_buckets:
         self.bucket_size = 250
         log.info(self.bucket_size)
         bucket_params = self._create_bucket_params(
             server=self.server,
             size=self.bucket_size,
             replicas=self.num_replicas)
         self.cluster.create_standard_bucket(name=self.src_bucket_name,
                                             port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.src_bucket = RestConnection(self.master).get_buckets()
         self.cluster.create_standard_bucket(name=self.dst_bucket_name,
                                             port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.cluster.create_standard_bucket(name=self.metadata_bucket_name,
                                             port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.cluster.create_standard_bucket(name=self.dst_bucket_name1,
                                             port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.buckets = RestConnection(self.master).get_buckets()
     self.gens_load = self.generate_docs(self.docs_per_day)
     self.expiry = 3
     handler_code = self.input.param('handler_code', 'bucket_op')
     # index is required for delete operation through n1ql
     self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
     self.n1ql_helper = N1QLHelper(shell=self.shell,
                                   max_verify=self.max_verify,
                                   buckets=self.buckets,
                                   item_flag=self.item_flag,
                                   n1ql_port=self.n1ql_port,
                                   full_docs_list=self.full_docs_list,
                                   log=self.log,
                                   input=self.input,
                                   master=self.master,
                                   use_rest=True)
     self.n1ql_helper.create_primary_index(using_gsi=True,
                                           server=self.n1ql_node)
     if self.non_default_collection:
         self.create_scope_collection(bucket=self.src_bucket_name,
                                      scope=self.src_bucket_name,
                                      collection=self.src_bucket_name)
         self.create_scope_collection(bucket=self.metadata_bucket_name,
                                      scope=self.metadata_bucket_name,
                                      collection=self.metadata_bucket_name)
         self.create_scope_collection(bucket=self.dst_bucket_name,
                                      scope=self.dst_bucket_name,
                                      collection=self.dst_bucket_name)
         self.create_scope_collection(bucket=self.dst_bucket_name1,
                                      scope=self.dst_bucket_name1,
                                      collection=self.dst_bucket_name1)
Exemple #11
0
 def test_bucket_overhead(self):
     body = self.create_save_function_body(
         self.function_name,
         HANDLER_CODE.BUCKET_OPS_WITH_CRON_TIMER,
         worker_count=3)
     # create an alias so that src bucket as well so that we can read data from source bucket
     body['depcfg']['buckets'].append({
         "alias": self.src_bucket_name,
         "bucket_name": self.src_bucket_name
     })
     self.rest.create_function(body['appname'], body, self.function_scope)
     self.deploy_function(body)
     # sleep intentionally added as we are validating no mutations are processed by eventing
     self.sleep(60)
     countMap = self.get_buckets_itemCount()
     initalDoc = countMap["metadata"]
     # load some data
     self.load(self.gens_load,
               buckets=self.src_bucket,
               flag=self.item_flag,
               verify_data=False,
               batch_size=self.batch_size)
     # Wait for eventing to catch up with all the delete mutations and verify results
     self.verify_eventing_results(self.function_name,
                                  2016 * self.docs_per_day,
                                  skip_stats_validation=True)
     countMap = self.get_buckets_itemCount()
     finalDoc = countMap["metadata"]
     if (initalDoc != finalDoc):
         n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
         n1ql_helper = N1QLHelper(shell=self.shell,
                                  max_verify=self.max_verify,
                                  buckets=self.buckets,
                                  item_flag=self.item_flag,
                                  n1ql_port=self.n1ql_port,
                                  full_docs_list=self.full_docs_list,
                                  log=self.log,
                                  input=self.input,
                                  master=self.master,
                                  use_rest=True)
         n1ql_helper.create_primary_index(using_gsi=True, server=n1ql_node)
         query1 = "select meta().id from metadata where meta().id not like 'eventing::%::vb::%'" \
                  " and meta().id not like 'eventing::%:rt:%' and meta().id not like 'eventing::%:sp'"
         result1 = n1ql_helper.run_cbq_query(query=query1, server=n1ql_node)
         print(result1)
         query2 = "select meta().id from metadata where meta().id like 'eventing::%:sp' and sta != stp"
         result2 = n1ql_helper.run_cbq_query(query=query2, server=n1ql_node)
         print(result2)
         query3 = "select meta().id from meta where meta().id like 'eventing::%:rt:%'"
         result3 = n1ql_helper.run_cbq_query(query=query3, server=n1ql_node)
         print(result3)
         self.fail(
             "initial doc in metadata {} is not equals to final doc in metadata {}"
             .format(initalDoc, finalDoc))
     self.undeploy_and_delete_function(body)
    def setUp(self):
        super(FTSReclaimableDiskSpace, self).setUp()

        self.default_group_name = "Group 1"
        self.n1ql = N1QLHelper(version="sherlock",
                               shell=None,
                               item_flag=None,
                               n1ql_port=8903,
                               full_docs_list=[],
                               log=self.log)
        self.rest = RestConnection(self._cb_cluster.get_master_node())
        self._cleanup_server_groups()
 def setUp(self):
     super(EventingVolume, self).setUp()
     self.rest.set_service_memoryQuota(service='memoryQuota',
                                       memoryQuota=2800)
     if self.create_functions_buckets:
         self.bucket_size = 1000
         self.metadata_bucket_size = 300
         self.replicas = 0
         bucket_params = self._create_bucket_params(server=self.server,
                                                    size=1500,
                                                    replicas=self.replicas)
         bucket_params_meta = self._create_bucket_params(
             server=self.server,
             size=self.metadata_bucket_size,
             replicas=self.replicas)
         self.cluster.create_standard_bucket(name=self.src_bucket_name,
                                             port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.src_bucket = RestConnection(self.master).get_buckets()
         bucket_params = self._create_bucket_params(server=self.server,
                                                    size=1000,
                                                    replicas=self.replicas)
         self.cluster.create_standard_bucket(name=self.dst_bucket_name,
                                             port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.cluster.create_standard_bucket(
             name=self.metadata_bucket_name,
             port=STANDARD_BUCKET_PORT + 1,
             bucket_params=bucket_params_meta)
         self.buckets = RestConnection(self.master).get_buckets()
         self.hostname = "http://qa.sc.couchbase.com/"
         self.create_n_scope(self.dst_bucket_name, 5)
         self.create_n_scope(self.src_bucket_name, 5)
         self.create_n_collections(self.dst_bucket_name, "scope_1", 5)
         self.create_n_collections(self.src_bucket_name, "scope_1", 5)
         self.handler_code = "handler_code/ABO/insert_rebalance.js"
     # index is required for delete operation through n1ql
     self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
     self.n1ql_helper = N1QLHelper(shell=self.shell,
                                   max_verify=self.max_verify,
                                   buckets=self.buckets,
                                   item_flag=self.item_flag,
                                   n1ql_port=self.n1ql_port,
                                   full_docs_list=self.full_docs_list,
                                   log=self.log,
                                   input=self.input,
                                   master=self.master,
                                   use_rest=True)
     self.n1ql_helper.create_primary_index(using_gsi=True,
                                           server=self.n1ql_node)
     self.batch_size = 10**4
    def setUp(self):

        self.input = TestInputSingleton.input
        self.input.test_params.update({"standard_buckets": 0})

        super(QueryParameterTest, self).setUp()
        self.query_file = self.input.param(
            "query_file", "b/resources/analytics_query_with_parameter.txt")
        self.n1ql_server = self.get_nodes_from_services_map(
            service_type="n1ql")
        self.curl_path = "curl"
        shell = RemoteMachineShellConnection(self.master)
        type = shell.extract_remote_info().distribution_type
        if type.lower() == 'windows':
            self.path = testconstants.WIN_COUCHBASE_BIN_PATH
            self.curl_path = "%scurl" % self.path
            self.n1ql_certs_path = "/cygdrive/c/Program\ Files/Couchbase/server/var/lib/couchbase/n1qlcerts"
        self.load_sample_buckets(servers=[self.master],
                                 bucketName="travel-sample",
                                 total_items=self.travel_sample_docs_count)
        self.cbas_util.createConn("travel-sample")

        # Create dataset on the CBAS bucket
        self.cbas_util.create_dataset_on_bucket(
            cbas_bucket_name=self.cb_bucket_name,
            cbas_dataset_name=self.cbas_dataset_name)

        # Connect to Bucket
        self.cbas_util.connect_to_bucket(
            cbas_bucket_name=self.cbas_bucket_name,
            cb_bucket_password=self.cb_bucket_password)

        # Allow ingestion to complete
        self.cbas_util.wait_for_ingestion_complete(
            [self.cbas_dataset_name], self.travel_sample_docs_count, 300)

        self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
        self.shell = RemoteMachineShellConnection(self.master)
        self.item_flag = self.input.param("item_flag", 0)
        self.n1ql_port = self.input.param("n1ql_port", 8093)
        self.n1ql_helper = N1QLHelper(shell=self.shell,
                                      max_verify=self.max_verify,
                                      buckets=self.buckets,
                                      item_flag=self.item_flag,
                                      n1ql_port=self.n1ql_port,
                                      log=self.log,
                                      input=self.input,
                                      master=self.master,
                                      use_rest=True)
Exemple #15
0
 def setUp(self):
     super(EventingRBACSupport, self).setUp()
     self.rest.set_service_memoryQuota(service='memoryQuota',
                                       memoryQuota=700)
     if self.create_functions_buckets:
         self.bucket_size = 100
         log.info(self.bucket_size)
         bucket_params = self._create_bucket_params(
             server=self.server,
             size=self.bucket_size,
             replicas=self.num_replicas)
         self.cluster.create_standard_bucket(name=self.src_bucket_name,
                                             port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.src_bucket = RestConnection(self.master).get_buckets()
         self.cluster.create_standard_bucket(name=self.dst_bucket_name,
                                             port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.cluster.create_standard_bucket(name=self.metadata_bucket_name,
                                             port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.buckets = RestConnection(self.master).get_buckets()
     self.gens_load = self.generate_docs(self.docs_per_day)
     self.expiry = 3
     handler_code = self.input.param('handler_code', 'bucket_op')
     # index is required for delete operation through n1ql
     self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
     self.n1ql_helper = N1QLHelper(shell=self.shell,
                                   max_verify=self.max_verify,
                                   buckets=self.buckets,
                                   item_flag=self.item_flag,
                                   n1ql_port=self.n1ql_port,
                                   full_docs_list=self.full_docs_list,
                                   log=self.log,
                                   input=self.input,
                                   master=self.master,
                                   use_rest=True)
     self.n1ql_helper.create_primary_index(using_gsi=True,
                                           server=self.n1ql_node)
     self.users = self.input.param('users', None)
     if self.users:
         list_of_users = eval(eval(self.users))
         for user in list_of_users:
             u = [{
                 'id': user['id'],
                 'password': user['password'],
                 'name': user['name']
             }]
             RbacBase().create_user_source(u, 'builtin', self.master)
             user_role_list = [{
                 'id': user['id'],
                 'name': user['name'],
                 'roles': user['roles']
             }]
             RbacBase().add_user_role(user_role_list, self.rest, 'builtin')
         status, content, header = rbacmain(
             self.master)._retrieve_user_roles()
         self.log.info(json.loads(content))
     handler_code = self.input.param('handler_code', 'bucket_op')
     if handler_code == 'bucket_op':
         self.handler_code = "handler_code/ABO/insert_rebalance.js"
     elif handler_code == 'bucket_op_with_timers':
         self.handler_code = HANDLER_CODE.BUCKET_OPS_WITH_TIMERS
     elif handler_code == 'bucket_op_with_cron_timers':
         self.handler_code = "handler_code/ABO/insert_timer.js"
     elif handler_code == 'n1ql_op_with_timers':
         self.handler_code = HANDLER_CODE.N1QL_OPS_WITH_TIMERS
     elif handler_code == 'n1ql_op_without_timers':
         self.handler_code = HANDLER_CODE.N1QL_OPS_WITHOUT_TIMERS
     elif handler_code == 'source_bucket_mutation':
         self.handler_code = "handler_code/ABO/insert_sbm.js"
     elif handler_code == 'source_bucket_mutation_delete':
         self.handler_code = HANDLER_CODE.BUCKET_OP_SOURCE_BUCKET_MUTATION_DELETE
     elif handler_code == 'bucket_op_curl_get':
         self.handler_code = HANDLER_CODE_CURL.BUCKET_OP_WITH_CURL_GET
     elif handler_code == 'bucket_op_curl_post':
         self.handler_code = HANDLER_CODE_CURL.BUCKET_OP_WITH_CURL_POST
     elif handler_code == 'bucket_op_curl_put':
         self.handler_code = HANDLER_CODE_CURL.BUCKET_OP_WITH_CURL_PUT
     elif handler_code == 'bucket_op_curl_delete':
         self.handler_code = HANDLER_CODE_CURL.BUCKET_OP_WITH_CURL_DELETE
     elif handler_code == 'cancel_timer':
         self.handler_code = HANDLER_CODE.CANCEL_TIMER_REBALANCE
     elif handler_code == 'bucket_op_expired':
         self.handler_code = HANDLER_CODE.BUCKET_OP_EXPIRED
     elif handler_code == 'advance_bucket_op_auth_failure':
         self.handler_code = "handler_code/ABO/advance_bucket_op_auth_failure.js"
     elif handler_code == 'n1ql_op_auth_failure':
         self.handler_code = "handler_code/n1ql_op_auth_failure.js"
Exemple #16
0
 def setUp(self):
     super(EventingFailover, self).setUp()
     self.rest.set_service_memoryQuota(service='memoryQuota',
                                       memoryQuota=700)
     if self.create_functions_buckets:
         self.replicas = self.input.param("replicas", 0)
         self.bucket_size = 100
         # This is needed as we have increased the context size to 93KB. If this is not increased the metadata
         # bucket goes into heavy DGM
         self.metadata_bucket_size = 200
         log.info(self.bucket_size)
         bucket_params = self._create_bucket_params(server=self.server,
                                                    size=self.bucket_size,
                                                    replicas=self.replicas)
         bucket_params_meta = self._create_bucket_params(
             server=self.server,
             size=self.metadata_bucket_size,
             replicas=self.replicas)
         self.cluster.create_standard_bucket(name=self.src_bucket_name,
                                             port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.src_bucket = RestConnection(self.master).get_buckets()
         self.cluster.create_standard_bucket(name=self.dst_bucket_name,
                                             port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.cluster.create_standard_bucket(
             name=self.metadata_bucket_name,
             port=STANDARD_BUCKET_PORT + 1,
             bucket_params=bucket_params_meta)
         self.buckets = RestConnection(self.master).get_buckets()
     self.gens_load = self.generate_docs(self.docs_per_day)
     self.expiry = 3
     handler_code = self.input.param('handler_code', 'bucket_op')
     if handler_code == 'bucket_op':
         self.handler_code = HANDLER_CODE.BUCKET_OP_WITH_RAND
     elif handler_code == 'bucket_op_with_timers':
         self.handler_code = HANDLER_CODE.BUCKET_OPS_WITH_TIMERS
     elif handler_code == 'n1ql_op_with_timers':
         self.handler_code = HANDLER_CODE.N1QL_OPS_WITH_TIMERS
     elif handler_code == 'n1ql_op_without_timers':
         self.handler_code = HANDLER_CODE.N1QL_OPS_WITHOUT_TIMERS
     elif handler_code == 'source_bucket_mutation':
         self.handler_code = HANDLER_CODE.BUCKET_OP_WITH_SOURCE_BUCKET_MUTATION
     elif handler_code == 'bucket_op_curl_jenkins':
         self.handler_code = HANDLER_CODE_CURL.BUCKET_OP_WITH_CURL_JENKINS
     else:
         self.handler_code = HANDLER_CODE.DELETE_BUCKET_OP_ON_DELETE
     force_disable_new_orchestration = self.input.param(
         'force_disable_new_orchestration', False)
     if force_disable_new_orchestration:
         self.rest.diag_eval(
             "ns_config:set(force_disable_new_orchestration, true).")
     if self.non_default_collection:
         self.create_scope_collection(bucket=self.src_bucket_name,
                                      scope=self.src_bucket_name,
                                      collection=self.src_bucket_name)
         self.create_scope_collection(bucket=self.metadata_bucket_name,
                                      scope=self.metadata_bucket_name,
                                      collection=self.metadata_bucket_name)
         self.create_scope_collection(bucket=self.dst_bucket_name,
                                      scope=self.dst_bucket_name,
                                      collection=self.dst_bucket_name)
     ##index is required for delete operation through n1ql
     self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
     self.n1ql_helper = N1QLHelper(shell=self.shell,
                                   max_verify=self.max_verify,
                                   buckets=self.buckets,
                                   item_flag=self.item_flag,
                                   n1ql_port=self.n1ql_port,
                                   full_docs_list=self.full_docs_list,
                                   log=self.log,
                                   input=self.input,
                                   master=self.master,
                                   use_rest=True)
     self.n1ql_helper.create_primary_index(using_gsi=True,
                                           server=self.n1ql_node)
Exemple #17
0
 def test_eventing_with_ephemeral_buckets_with_lww_enabled(self):
     # delete existing couchbase buckets which will be created as part of setup
     for bucket in self.buckets:
         # Having metadata bucket as an ephemeral bucket is a bad idea
         if bucket.name != "metadata":
             self.rest.delete_bucket(bucket.name)
     # create ephemeral buckets with the same name
     bucket_params = self._create_bucket_params(
         server=self.server,
         size=self.bucket_size,
         replicas=self.num_replicas,
         bucket_type='ephemeral',
         eviction_policy='noEviction',
         lww=True)
     tasks = []
     for bucket in self.buckets:
         # Having metadata bucket as an ephemeral bucket is a bad idea
         if bucket.name != "metadata":
             tasks.append(
                 self.cluster.async_create_standard_bucket(
                     name=bucket.name,
                     port=STANDARD_BUCKET_PORT + 1,
                     bucket_params=bucket_params))
     for task in tasks:
         task.result()
     n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
     n1ql_helper = N1QLHelper(shell=self.shell,
                              max_verify=self.max_verify,
                              buckets=self.buckets,
                              item_flag=self.item_flag,
                              n1ql_port=self.n1ql_port,
                              full_docs_list=self.full_docs_list,
                              log=self.log,
                              input=self.input,
                              master=self.master,
                              use_rest=True)
     for bucket in self.buckets:
         if bucket.name != "metadata":
             query = "CREATE PRIMARY INDEX ON %s " % bucket.name
             n1ql_helper.run_cbq_query(query=query, server=n1ql_node)
     try:
         # load data
         self.load(self.gens_load,
                   buckets=self.src_bucket,
                   flag=self.item_flag,
                   verify_data=False,
                   batch_size=self.batch_size)
     except:
         pass
     body = self.create_save_function_body(self.function_name,
                                           self.handler_code)
     self.deploy_function(body)
     stats_src = RestConnection(
         self.master).get_bucket_stats(bucket=self.src_bucket_name)
     # Wait for eventing to catch up with all the update mutations and verify results
     self.verify_eventing_results(self.function_name,
                                  stats_src["curr_items"],
                                  skip_stats_validation=True)
     try:
         # delete all documents
         self.load(self.gens_load,
                   buckets=self.src_bucket,
                   flag=self.item_flag,
                   verify_data=False,
                   batch_size=self.batch_size,
                   op_type='delete')
     except:
         pass
     # Wait for eventing to catch up with all the delete mutations and verify results
     self.verify_eventing_results(self.function_name,
                                  0,
                                  skip_stats_validation=True)
     self.undeploy_and_delete_function(body)
     # intentionally added , as it requires some time for eventing-consumers to shutdown
     self.sleep(30)
     self.assertTrue(
         self.check_if_eventing_consumers_are_cleaned_up(),
         msg=
         "eventing-consumer processes are not cleaned up even after undeploying the function"
     )
 def setUp(self):
     super(EventingTools, self).setUp()
     self.rest.set_service_memoryQuota(service='memoryQuota',
                                       memoryQuota=500)
     if self.create_functions_buckets:
         self.bucket_size = 100
         log.info(self.bucket_size)
         bucket_params = self._create_bucket_params(
             server=self.server,
             size=self.bucket_size,
             replicas=self.num_replicas)
         self.cluster.create_standard_bucket(name=self.src_bucket_name,
                                             port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.src_bucket = RestConnection(self.master).get_buckets()
         self.cluster.create_standard_bucket(name=self.dst_bucket_name,
                                             port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.cluster.create_standard_bucket(name=self.metadata_bucket_name,
                                             port=STANDARD_BUCKET_PORT + 1,
                                             bucket_params=bucket_params)
         self.buckets = RestConnection(self.master).get_buckets()
     self.gens_load = self.generate_docs(self.docs_per_day)
     self.expiry = 3
     handler_code = self.input.param('handler_code', 'bucket_op')
     if handler_code == 'bucket_op':
         self.handler_code = HANDLER_CODE.DELETE_BUCKET_OP_ON_DELETE
     elif handler_code == 'bucket_op_with_timers':
         self.handler_code = HANDLER_CODE.BUCKET_OPS_WITH_TIMERS
     elif handler_code == 'bucket_op_with_cron_timers':
         self.handler_code = HANDLER_CODE.BUCKET_OPS_WITH_CRON_TIMERS
     elif handler_code == 'n1ql_op_with_timers':
         # index is required for delete operation through n1ql
         self.n1ql_node = self.get_nodes_from_services_map(
             service_type="n1ql")
         self.n1ql_helper = N1QLHelper(shell=self.shell,
                                       max_verify=self.max_verify,
                                       buckets=self.buckets,
                                       item_flag=self.item_flag,
                                       n1ql_port=self.n1ql_port,
                                       full_docs_list=self.full_docs_list,
                                       log=self.log,
                                       input=self.input,
                                       master=self.master,
                                       use_rest=True)
         self.n1ql_helper.create_primary_index(using_gsi=True,
                                               server=self.n1ql_node)
         self.handler_code = HANDLER_CODE.N1QL_OPS_WITH_TIMERS
     else:
         self.handler_code = HANDLER_CODE.DELETE_BUCKET_OP_ON_DELETE
     self.backupset = Backupset()
     self.backupset.directory = self.input.param("dir", "/tmp/entbackup")
     self.backupset.name = self.input.param("name", "backup")
     self.backupset.backup_host = self.servers[0]
     self.backupset.cluster_host = self.servers[0]
     self.backupset.cluster_host_username = self.servers[0].rest_username
     self.backupset.cluster_host_password = self.servers[0].rest_password
     self.backupset.restore_cluster_host = self.servers[1]
     self.backupset.restore_cluster_host_username = self.servers[
         1].rest_username
     self.backupset.restore_cluster_host_password = self.servers[
         1].rest_password
     self.num_shards = self.input.param("num_shards", None)
     self.debug_logs = self.input.param("debug-logs", False)
     cmd = 'curl -g %s:8091/diag/eval -u Administrator:password ' % self.master.ip
     cmd += '-d "path_config:component_path(bin)."'
     bin_path = subprocess.check_output(cmd, shell=True)
     try:
         bin_path = bin_path.decode()
     except AttributeError:
         pass
     if "bin" not in bin_path:
         self.fail("Check if cb server install on %s" % self.master.ip)
     else:
         self.cli_command_location = bin_path.replace('"', '') + "/"
     shell = RemoteMachineShellConnection(self.servers[0])
     info = shell.extract_remote_info().type.lower()
     self.root_path = LINUX_ROOT_PATH
     self.wget = "wget"
     self.os_name = "linux"
     self.tmp_path = "/tmp/"
     self.long_help_flag = "--help"
     self.short_help_flag = "-h"
     if info == 'linux':
         if self.nonroot:
             base_path = "/home/%s" % self.master.ssh_username
             self.database_path = "%s%s" % (base_path, COUCHBASE_DATA_PATH)
             self.root_path = "/home/%s/" % self.master.ssh_username
     elif info == 'windows':
         self.os_name = "windows"
         self.cmd_ext = ".exe"
         self.wget = "/cygdrive/c/automation/wget.exe"
         self.database_path = WIN_COUCHBASE_DATA_PATH_RAW
         self.root_path = WIN_ROOT_PATH
         self.tmp_path = WIN_TMP_PATH
         self.long_help_flag = "help"
         self.short_help_flag = "h"
         win_format = "C:/Program Files"
         cygwin_format = "/cygdrive/c/Program\ Files"
         if win_format in self.cli_command_location:
             self.cli_command_location = self.cli_command_location.replace(
                 win_format, cygwin_format)
         self.backupset.directory = self.input.param(
             "dir", WIN_TMP_PATH_RAW + "entbackup")
     elif info == 'mac':
         self.backupset.directory = self.input.param(
             "dir", "/tmp/entbackup")
     else:
         raise Exception("OS not supported.")
     self.backup_validation_files_location = "/tmp/backuprestore" + self.master.ip
     self.backups = []
     self.validation_helper = BackupRestoreValidations(
         self.backupset, self.cluster_to_backup, self.cluster_to_restore,
         self.buckets, self.backup_validation_files_location, self.backups,
         self.num_items, self.vbuckets)
     self.restore_only = self.input.param("restore-only", False)
     self.same_cluster = self.input.param("same-cluster", False)
     self.reset_restore_cluster = self.input.param("reset-restore-cluster",
                                                   True)
     self.no_progress_bar = self.input.param("no-progress-bar", True)
     self.multi_threads = self.input.param("multi_threads", False)
     self.threads_count = self.input.param("threads_count", 1)
     self.bucket_delete = self.input.param("bucket_delete", False)
     self.bucket_flush = self.input.param("bucket_flush", False)
     include_buckets = self.input.param("include-buckets", "")
     include_buckets = include_buckets.split(",") if include_buckets else []
     exclude_buckets = self.input.param("exclude-buckets", "")
     exclude_buckets = exclude_buckets.split(",") if exclude_buckets else []
     self.backupset.exclude_buckets = exclude_buckets
     self.backupset.include_buckets = include_buckets
     self.backupset.disable_bucket_config = self.input.param(
         "disable-bucket-config", False)
     self.backupset.disable_views = self.input.param("disable-views", False)
     self.backupset.disable_gsi_indexes = self.input.param(
         "disable-gsi-indexes", False)
     self.backupset.disable_ft_indexes = self.input.param(
         "disable-ft-indexes", False)
     self.backupset.disable_data = self.input.param("disable-data", False)
     self.backupset.disable_conf_res_restriction = self.input.param(
         "disable-conf-res-restriction", None)
     self.backupset.force_updates = self.input.param("force-updates", True)
     self.backupset.resume = self.input.param("resume", False)
     self.backupset.purge = self.input.param("purge", False)
     self.backupset.threads = self.input.param("threads",
                                               self.number_of_processors())
     self.backupset.start = self.input.param("start", 1)
     self.backupset.end = self.input.param("stop", 1)
     self.backupset.number_of_backups = self.input.param(
         "number_of_backups", 1)
     self.backupset.number_of_backups_after_upgrade = \
         self.input.param("number_of_backups_after_upgrade", 0)
     self.backupset.filter_keys = self.input.param("filter-keys", "")
     self.backupset.random_keys = self.input.param("random_keys", False)
     self.backupset.filter_values = self.input.param("filter-values", "")
     self.backupset.no_ssl_verify = self.input.param("no-ssl-verify", False)
     self.backupset.secure_conn = self.input.param("secure-conn", False)
     self.backupset.bk_no_cert = self.input.param("bk-no-cert", False)
     self.backupset.rt_no_cert = self.input.param("rt-no-cert", False)
     self.backupset.backup_list_name = self.input.param("list-names", None)
     self.backupset.backup_incr_backup = self.input.param(
         "incr-backup", None)
     self.backupset.bucket_backup = self.input.param("bucket-backup", None)
     self.backupset.backup_to_compact = self.input.param(
         "backup-to-compact", 0)
     self.backupset.map_buckets = self.input.param("map-buckets", None)
     self.add_node_services = self.input.param("add-node-services", "kv")
     self.backupset.backup_compressed = \
         self.input.param("backup-conpressed", False)
     self.number_of_backups_taken = 0
     self.vbucket_seqno = []
     self.expires = self.input.param("expires", 0)
     self.auto_failover = self.input.param("enable-autofailover", False)
     self.auto_failover_timeout = self.input.param("autofailover-timeout",
                                                   30)
     self.graceful = self.input.param("graceful", False)
     self.recoveryType = self.input.param("recoveryType", "full")
     self.skip_buckets = self.input.param("skip_buckets", False)
     self.lww_new = self.input.param("lww_new", False)
     self.skip_consistency = self.input.param("skip_consistency", False)
     self.master_services = self.get_services([self.backupset.cluster_host],
                                              self.services_init,
                                              start_node=0)
     if not self.master_services:
         self.master_services = ["kv"]
     self.per_node = self.input.param("per_node", True)
     if not os.path.exists(self.backup_validation_files_location):
         os.mkdir(self.backup_validation_files_location)
     self.total_buckets = len(self.buckets)
     self.replace_ttl = self.input.param("replace-ttl", None)
     self.replace_ttl_with = self.input.param("replace-ttl-with", None)
     self.verify_before_expired = self.input.param("verify-before-expired",
                                                   False)
     self.vbucket_filter = self.input.param("vbucket-filter", None)
     self.new_replicas = self.input.param("new-replicas", None)
     self.should_fail = self.input.param("should-fail", False)
     self.restore_compression_mode = self.input.param(
         "restore-compression-mode", None)
     self.enable_firewall = False
     self.vbuckets_filter_no_data = False
     self.test_fts = self.input.param("test_fts", False)
     self.restore_should_fail = self.input.param("restore_should_fail",
                                                 False)
 def test_xdcr_and_indexing_with_eventing(self):
     rest_src = RestConnection(self.servers[0])
     rest_dst = RestConnection(self.servers[2])
     self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
     self.n1ql_helper = N1QLHelper(shell=self.shell,
                                   max_verify=self.max_verify,
                                   buckets=self.buckets,
                                   item_flag=self.item_flag,
                                   n1ql_port=self.n1ql_port,
                                   full_docs_list=self.full_docs_list,
                                   log=self.log,
                                   input=self.input,
                                   master=self.master,
                                   use_rest=True)
     self.n1ql_helper.create_primary_index(using_gsi=True,
                                           server=self.n1ql_node)
     try:
         rest_src.remove_all_replications()
         rest_src.remove_all_remote_clusters()
         rest_src.add_remote_cluster(self.servers[2].ip,
                                     self.servers[2].port,
                                     self.servers[0].rest_username,
                                     self.servers[0].rest_password, "C2")
         rest_dst.create_bucket(bucket=self.src_bucket_name, ramQuotaMB=100)
         self.sleep(30)
         # setup xdcr relationship
         repl_id = rest_src.start_replication('continuous',
                                              self.src_bucket_name, "C2")
         if repl_id is not None:
             self.log.info("Replication created successfully")
         self.load(self.gens_load,
                   buckets=self.src_bucket,
                   flag=self.item_flag,
                   verify_data=False,
                   batch_size=self.batch_size)
         body = self.create_save_function_body(
             self.function_name, HANDLER_CODE.DELETE_BUCKET_OP_ON_DELETE)
         # deploy function
         self.deploy_function(body)
         # Wait for eventing to catch up with all the update mutations and verify results
         self.verify_eventing_results(self.function_name,
                                      self.docs_per_day * 2016)
         stats_xdcr_dst = rest_dst.get_bucket_stats(self.src_bucket_name)
         index_bucket_map = self.n1ql_helper.get_index_count_using_primary_index(
             self.buckets, self.n1ql_node)
         actual_count = index_bucket_map[self.src_bucket_name]
         log.info("No of docs in xdcr destination bucket : {0}".format(
             stats_xdcr_dst["curr_items"]))
         log.info("No of docs indexed by primary index: {0}".format(
             actual_count))
         if stats_xdcr_dst["curr_items"] != self.docs_per_day * 2016:
             self.fail(
                 "xdcr did not replicate all documents, actual : {0} expected : {1}"
                 .format(stats_xdcr_dst["curr_items"],
                         self.docs_per_day * 2016))
         if actual_count != self.docs_per_day * 2016:
             self.fail(
                 "Not all the items were indexed, actual : {0} expected : {1}"
                 .format(actual_count, self.docs_per_day * 2016))
         # delete all documents
         self.load(self.gens_load,
                   buckets=self.src_bucket,
                   flag=self.item_flag,
                   verify_data=False,
                   batch_size=self.batch_size,
                   op_type='delete')
         # Wait for eventing to catch up with all the delete mutations and verify results
         self.verify_eventing_results(self.function_name,
                                      0,
                                      skip_stats_validation=True)
         stats_xdcr_dst = rest_dst.get_bucket_stats(self.src_bucket_name)
         index_bucket_map = self.n1ql_helper.get_index_count_using_primary_index(
             self.buckets, self.n1ql_node)
         actual_count = index_bucket_map[self.src_bucket_name]
         log.info("No of docs in xdcr destination bucket : {0}".format(
             stats_xdcr_dst["curr_items"]))
         log.info("No of docs indexed by primary index: {0}".format(
             actual_count))
         if stats_xdcr_dst["curr_items"] != 0:
             self.fail(
                 "xdcr did not replicate all documents, actual : {0} expected : {1}"
                 .format(stats_xdcr_dst["curr_items"], 0))
         if actual_count != 0:
             self.fail(
                 "Not all the items were indexed, actual : {0} expected : {1}"
                 .format(actual_count, 0))
         self.undeploy_and_delete_function(body)
     finally:
         self.n1ql_helper.drop_primary_index(using_gsi=True,
                                             server=self.n1ql_node)
         rest_dst.delete_bucket()