Example #1
0
 def print_eventing_stats_from_all_eventing_nodes(self):
     eventing_nodes = self.cluster_util.get_nodes_from_services_map(service_type="eventing", get_all_nodes=True)
     for eventing_node in eventing_nodes:
         rest_conn = EventingHelper(eventing_node)
         out = rest_conn.get_all_eventing_stats()
         log.info("Stats for Node {0} is \n{1} ".format(eventing_node.ip, json.dumps(out, sort_keys=True,
                                                                                   indent=4)))
Example #2
0
 def cleanup_eventing(self):
     ev_node = self.cluster_util.get_nodes_from_services_map(
         cluster=self.cluster,
         service_type=CbServer.Services.EVENTING,
         get_all_nodes=False)
     ev_rest = EventingHelper(ev_node)
     self.log.info("Running eventing cleanup api...")
     ev_rest.cleanup_eventing()
Example #3
0
 def print_go_routine_dump_from_all_eventing_nodes(self):
     eventing_nodes = self.cluster_util.get_nodes_from_services_map(service_type="eventing", get_all_nodes=True)
     for eventing_node in eventing_nodes:
         rest_conn = EventingHelper(eventing_node)
         out = rest_conn.get_eventing_go_routine_dumps()
         self.log.info("Go routine dumps for Node {0} is \n{1} ======================================================"
                  "============================================================================================="
                  "\n\n".format(eventing_node.ip, out))
Example #4
0
 def print_go_routine_dump_from_all_eventing_nodes(self):
     for eventing_node in self.eventing_nodes:
         rest_conn = EventingHelper(eventing_node)
         out = rest_conn.get_eventing_go_routine_dumps()
         self.log.info(
             "Go routine dumps for Node {0} is \n{1} ======================================================"
             "============================================================================================="
             "\n\n".format(eventing_node.ip, out))
Example #5
0
 def __init__(self,
              master,
              eventing_nodes,
              src_bucket_name='src_bucket',
              dst_bucket_name='dst_bucket',
              metadata_bucket_name='metadata',
              dst_bucket_name1='dst_bucket_name1',
              eventing_log_level='INFO',
              use_memory_manager=True,
              timer_storage_chan_size=10000,
              dcp_gen_chan_size=10000,
              is_sbm=False,
              is_curl=False,
              hostname='https://postman-echo.com/',
              auth_type='no-auth',
              curl_username=None,
              curl_password=None,
              cookies=False,
              print_eventing_handler_code_in_logs=True):
     self.log = logging.getLogger("test")
     self.eventing_nodes = eventing_nodes
     self.master = master
     self.eventing_helper = EventingHelper(self.eventing_nodes[0])
     self.src_bucket_name = src_bucket_name
     self.dst_bucket_name = dst_bucket_name
     self.metadata_bucket_name = metadata_bucket_name
     self.dst_bucket_name1 = dst_bucket_name1
     self.eventing_log_level = eventing_log_level
     self.use_memory_manager = use_memory_manager
     self.timer_storage_chan_size = timer_storage_chan_size
     self.dcp_gen_chan_size = dcp_gen_chan_size
     self.is_sbm = is_sbm
     self.is_curl = is_curl
     self.hostname = hostname
     self.auth_type = auth_type
     self.curl_username = curl_username
     self.curl_password = curl_password
     self.cookies = cookies
     self.bucket_helper = BucketHelper(self.master)
     self.print_eventing_handler_code_in_logs = print_eventing_handler_code_in_logs
Example #6
0
 def verify_eventing_results(self,
                             name,
                             expected_dcp_mutations,
                             doc_timer_events=False,
                             on_delete=False,
                             skip_stats_validation=False,
                             bucket=None,
                             timeout=600):
     # This resets the rest server as the previously used rest server might be out of cluster due to rebalance
     num_nodes = self.refresh_rest_server()
     eventing_nodes = self.cluster_util.get_nodes_from_services_map(
         cluster=self.cluster,
         service_type=CbServer.Services.EVENTING,
         get_all_nodes=True)
     if bucket is None:
         bucket = self.dst_bucket_name
     if self.is_sbm:
         bucket = self.src_bucket_name
     if not skip_stats_validation:
         # we can't rely on dcp_mutation stats when doc timers events are set.
         # TODO : add this back when getEventProcessingStats works reliably for doc timer events as well
         if not doc_timer_events:
             count = 0
             if num_nodes <= 1:
                 stats = self.eventing_helper.get_event_processing_stats(
                     name)
             else:
                 stats = self.eventing_helper.get_aggregate_event_processing_stats(
                     name)
             if on_delete:
                 mutation_type = "dcp_deletion"
             else:
                 mutation_type = "dcp_mutation"
             actual_dcp_mutations = stats[mutation_type]
             # This is required when binary data is involved where dcp_mutation will have process DCP_MUTATIONS
             # but ignore it
             # wait for eventing node to process dcp mutations
             self.log.info("Number of {0} processed till now : {1}".format(
                 mutation_type, actual_dcp_mutations))
             while actual_dcp_mutations != expected_dcp_mutations and count < 20:
                 self.sleep(
                     timeout / 20,
                     message=
                     "Waiting for eventing to process all dcp mutations...")
                 count += 1
                 if num_nodes <= 1:
                     stats = self.eventing_helper.get_event_processing_stats(
                         name)
                 else:
                     stats = self.eventing_helper.get_aggregate_event_processing_stats(
                         name)
                 actual_dcp_mutations = stats[mutation_type]
                 self.log.info(
                     "Number of {0} processed till now : {1}".format(
                         mutation_type, actual_dcp_mutations))
             if count == 20:
                 raise Exception(
                     "Eventing has not processed all the {0}. Current : {1} Expected : {2}"
                     .format(mutation_type, actual_dcp_mutations,
                             expected_dcp_mutations))
     # wait for bucket operations to complete and verify it went through successfully
     count = 0
     stats_dst = self.bucket_helper.get_bucket_stats(bucket)
     while stats_dst["curr_items"] != expected_dcp_mutations and count < 20:
         message = "Waiting for handler code {2} to complete bucket operations... Current : {0} Expected : {1}".\
                   format(stats_dst["curr_items"], expected_dcp_mutations,name)
         self.sleep(timeout / 20, message=message)
         curr_items = stats_dst["curr_items"]
         stats_dst = self.bucket_helper.get_bucket_stats(bucket)
         if curr_items == stats_dst["curr_items"]:
             count += 1
         else:
             count = 0
     try:
         stats_src = self.bucket_helper.get_bucket_stats(
             self.src_bucket_name)
         self.log.info("Documents in source bucket : {}".format(
             stats_src["curr_items"]))
     except:
         pass
     if stats_dst["curr_items"] != expected_dcp_mutations:
         total_dcp_backlog = 0
         timers_in_past = 0
         lcb = {}
         # TODO : Use the following stats in a meaningful way going forward. Just printing them for debugging.
         for eventing_node in eventing_nodes:
             rest_conn = EventingHelper(eventing_node)
             out = rest_conn.get_all_eventing_stats()
             total_dcp_backlog += out[0]["events_remaining"]["dcp_backlog"]
             if "TIMERS_IN_PAST" in out[0]["event_processing_stats"]:
                 timers_in_past += out[0]["event_processing_stats"][
                     "TIMERS_IN_PAST"]
             total_lcb_exceptions = out[0]["lcb_exception_stats"]
             host = eventing_node.ip
             lcb[host] = total_lcb_exceptions
             full_out = rest_conn.get_all_eventing_stats(
                 seqs_processed=True)
             self.log.debug("Stats for Node {0} is \n{1} ".format(
                 eventing_node.ip, json.dumps(out, sort_keys=True,
                                              indent=4)))
             self.log.debug("Full Stats for Node {0} is \n{1} ".format(
                 eventing_node.ip,
                 json.dumps(full_out, sort_keys=True, indent=4)))
         raise Exception(
             "Bucket operations from handler code took lot of time to complete or didn't go through. Current : {0} "
             "Expected : {1}  dcp_backlog : {2}  TIMERS_IN_PAST : {3} lcb_exceptions : {4}"
             .format(stats_dst["curr_items"], expected_dcp_mutations,
                     total_dcp_backlog, timers_in_past, lcb))
     self.log.info(
         "Final docs count... Current : {0} Expected : {1}".format(
             stats_dst["curr_items"], expected_dcp_mutations))
     # TODO : Use the following stats in a meaningful way going forward. Just printing them for debugging.
     # print all stats from all eventing nodes
     # These are the stats that will be used by ns_server and UI
     for eventing_node in eventing_nodes:
         rest_conn = EventingHelper(eventing_node)
         out = rest_conn.get_all_eventing_stats()
         full_out = rest_conn.get_all_eventing_stats(seqs_processed=True)
         self.log.debug("Stats for Node {0} is \n{1} ".format(
             eventing_node.ip, json.dumps(out, sort_keys=True, indent=4)))
         self.log.debug("Full Stats for Node {0} is \n{1} ".format(
             eventing_node.ip, json.dumps(full_out,
                                          sort_keys=True,
                                          indent=4)))
Example #7
0
 def setUp(self):
     super(EventingBaseTest, self).setUp()
     self.bucket_helper = BucketHelper(self.cluster.master)
     self.input = TestInputSingleton.input
     self.services_init = self.input.param("services_init", None)
     self.services = self.cluster_util.get_services(self.cluster.servers,
                                                    self.services_init)
     # rebalance all nodes into the cluster before each test
     if self.num_servers > 1:
         self.services = self.cluster_util.get_services(
             self.cluster.servers[:self.nodes_init], self.services_init)
         """ if there is not node service in ini file, kv needs to be added in
             to avoid exception when add node """
         if self.services is not None and int(self.nodes_init) - len(
                 self.services) > 0:
             for i in range(0, int(self.nodes_init) - len(self.services)):
                 self.services.append("kv")
         self.task.rebalance(self.servers[:1],
                             self.servers[1:self.nodes_init], [],
                             services=self.services)
     self.input.test_params.update({"default_bucket": False})
     self.master = self.servers[0]
     self.server = self.master
     self.restServer = self.cluster_util.get_nodes_from_services_map(
         cluster=self.cluster, service_type=CbServer.Services.EVENTING)
     self.rest = RestConnection(self.restServer)
     self.eventing_helper = EventingHelper(self.restServer)
     self.rest.set_indexer_storage_mode()
     self.log.info(
         "Setting the min possible memory quota so that adding mode nodes to the cluster wouldn't be a problem."
     )
     self.rest.set_service_mem_quota({
         CbServer.Settings.KV_MEM_QUOTA:
         330,
         CbServer.Settings.INDEX_MEM_QUOTA:
         CbServer.Settings.MinRAMQuota.INDEX,
         CbServer.Settings.EVENTING_MEM_QUOTA:
         CbServer.Settings.MinRAMQuota.EVENTING
     })
     self.src_bucket_name = self.input.param('src_bucket_name',
                                             'src_bucket')
     self.eventing_log_level = self.input.param('eventing_log_level',
                                                'INFO')
     self.dst_bucket_name = self.input.param('dst_bucket_name',
                                             'dst_bucket')
     self.dst_bucket_name1 = self.input.param('dst_bucket_name1',
                                              'dst_bucket1')
     self.metadata_bucket_name = self.input.param('metadata_bucket_name',
                                                  'metadata')
     self.create_functions_buckets = self.input.param(
         'create_functions_buckets', True)
     self.docs_per_day = self.input.param("doc-per-day", 1)
     self.use_memory_manager = self.input.param('use_memory_manager', True)
     self.print_eventing_handler_code_in_logs = self.input.param(
         'print_eventing_handler_code_in_logs', True)
     random.seed(datetime.time)
     function_name = "Function_{0}_{1}".format(
         random.randint(1, 1000000000), self._testMethodName)
     self.function_name = function_name[0:90]
     self.timer_storage_chan_size = self.input.param(
         'timer_storage_chan_size', 10000)
     self.dcp_gen_chan_size = self.input.param('dcp_gen_chan_size', 10000)
     self.is_sbm = self.input.param('source_bucket_mutation', False)
     self.pause_resume = self.input.param('pause_resume', False)
     self.pause_resume_number = self.input.param('pause_resume_number', 1)
     self.is_curl = self.input.param('curl', False)
     self.hostname = self.input.param('host', 'https://postman-echo.com/')
     self.curl_username = self.input.param('curl_user', None)
     self.curl_password = self.input.param('curl_password', None)
     self.auth_type = self.input.param('auth_type', 'no-auth')
     self.bearer_key = self.input.param('bearer_key', None)
     self.url = self.input.param('path', None)
     self.cookies = self.input.param('cookies', False)
     self.bearer_key = self.input.param('bearer_key', '')
     # # Define Helper Method which will be used for running n1ql queries, create index, drop index
     # self.version = self.input.param("cbq_version", "git_repo")
     # if self.input.tuq_client and "client" in self.input.tuq_client:
     #     self.shell = RemoteMachineShellConnection(self.input.tuq_client["client"])
     # else:
     #     self.shell = RemoteMachineShellConnection(self.cluster.master)
     # self.use_rest = self.input.param("use_rest", True)
     # self.max_verify = self.input.param("max_verify", None)
     # self.n1ql_port = self.input.param("n1ql_port", 8093)
     # self.full_docs_list = self.generate_full_docs_list(self.gens_load)
     # self.n1ql_helper = N1QLHelper(
     #     version=self.version, shell=self.shell,
     #     use_rest=self.use_rest, max_verify=self.max_verify,
     #     buckets=self.cluster.buckets, item_flag=self.item_flag,
     #     n1ql_port=self.n1ql_port, full_docs_list=self.full_docs_list,
     #     log=self.log, input=self.input, master=self.cluster.master)
     self.n1ql_node = self.cluster_util.get_nodes_from_services_map(
         cluster=self.cluster, service_type=CbServer.Services.EVENTING)
     if self.hostname == 'local':
         self.insall_dependencies()
         s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
         s.connect(("8.8.8.8", 80))
         ip = s.getsockname()[0]
         s.close()
         self.hostname = "http://" + ip + ":1080/"
         self.log.info("local ip address:{}".format(self.hostname))
         self.setup_curl()
Example #8
0
 def print_eventing_stats_from_all_eventing_nodes(self):
     for eventing_node in self.eventing_nodes:
         rest_conn = EventingHelper(eventing_node)
         out = rest_conn.get_all_eventing_stats()
         self.log.info("Stats for Node {0} is \n{1} ".format(
             eventing_node.ip, json.dumps(out, sort_keys=True, indent=4)))
Example #9
0
 def cleanup_eventing(self):
     ev_rest = EventingHelper(self.eventing_nodes[0])
     self.log.info("Running eventing cleanup api...")
     ev_rest.cleanup_eventing()