Ejemplo n.º 1
0
    def load_sample_buckets(self, server, bucketName, total_items=None):
        """
        Load the specified sample bucket in Couchbase
        """
        self.rest.load_sample(bucketName)
        BucketOperationHelper.wait_for_memcached(self.master, bucketName)
        """ check for load data into travel-sample bucket """
        if total_items:
            import time
            end_time = time.time() + 300
            while time.time() < end_time:
                self.sleep(10)
                num_actual = self.get_item_count(self.master, bucketName)
                if int(num_actual) == total_items:
                    self.log.info("%s items are loaded in the %s bucket" %
                                  (num_actual, bucketName))
                    break

            if int(num_actual) != total_items:
                return False
        else:
            self.sleep(120)

        return True
Ejemplo n.º 2
0
 def tear_down_buckets(self):
     self.log.info("tearing down bucket")
     BucketOperationHelper.delete_all_buckets_or_assert(
         self.input.servers, self)
     self.log.info("bucket teared down")
Ejemplo n.º 3
0
 def tear_down_buckets(self):
     self.log.info("tearing down bucket")
     BucketOperationHelper.delete_all_buckets_or_assert(self.input.servers,
                                                        self)
     self.log.info("bucket teared down")
Ejemplo n.º 4
0
 def tear_down_buckets(self):
     print "[perf.tearDown] Tearing down bucket"
     BucketOperationHelper.delete_all_buckets_or_assert(self.input.servers,
                                                        self)
     print "[perf.tearDown] Bucket teared down"
Ejemplo n.º 5
0
 def tear_down_buckets(self):
     print "[perf.tearDown] Tearing down bucket"
     BucketOperationHelper.delete_all_buckets_or_assert(
         self.input.servers, self)
     print "[perf.tearDown] Bucket teared down"
Ejemplo n.º 6
0
    def test_rqg_concurrent_new(self):
        # Get Data Map
        table_list = self.client._get_table_list()
        table_map = self.client._get_values_with_type_for_fields_in_table()
        if self.remove_alias:
            for key in list(table_map.keys()):
                if "alias_name" in list(table_map[key].keys()):
                    table_map[key].pop("alias_name", None)
        check = True
        failure_map = {}
        batches = queue.Queue()
        batch = []
        test_case_number = 1
        count = 1
        inserted_count = 0
        self.use_secondary_index = self.run_query_with_secondary or self.run_explain_with_hints
        # Load All the templates
        self.test_file_path = self.unzip_template(self.test_file_path)
        with open(self.test_file_path) as f:
            query_list = f.readlines()
        if self.total_queries == None:
            self.total_queries = len(query_list)
        for n1ql_query_info in query_list:
            data = n1ql_query_info
            batch.append({str(test_case_number): data})
            if count == self.concurreny_count:
                inserted_count += len(batch)
                batches.put(batch)
                count = 1
                batch = []
            else:
                count += 1
            test_case_number += 1
            if test_case_number > self.total_queries:
                break
        if inserted_count != len(query_list):
            batches.put(batch)
        result_queue = queue.Queue()
        input_queue = queue.Queue()
        failure_record_queue = queue.Queue()
        # Run Test Batches
        test_case_number = 1
        thread_list = []
        start_test_case_number = 1
        table_queue_map = {}
        for table_name in table_list:
            table_queue_map[table_name] = queue.Queue()
        self.log.info("CREATE BACTHES")
        while not batches.empty():
            # Build all required secondary Indexes
            for table_name in table_list:
                if batches.empty():
                    break
                test_batch = batches.get()

                list = [data[list(data.keys())[0]] for data in test_batch]
                table_queue_map[table_name].put({
                    "table_name":
                    table_name,
                    "table_map":
                    table_map,
                    "list":
                    list,
                    "start_test_case_number":
                    start_test_case_number
                })
                start_test_case_number += len(list)
        self.log.info("SPAWNING THREADS")
        for table_name in table_list:
            t = threading.Thread(target=self._testrun_worker_new,
                                 args=(table_queue_map[table_name],
                                       result_queue, failure_record_queue))
            t.daemon = True
            t.start()
            thread_list.append(t)
            # Drop all the secondary Indexes
        for t in thread_list:
            t.join()

        if self.drop_bucket == True:
            #import pdb;pdb.set_trace()
            for bucket in self.buckets:
                BucketOperationHelper.delete_bucket_or_assert(
                    serverInfo=self.master, bucket=bucket)
        # Analyze the results for the failure and assert on the run
        success, summary, result = self._test_result_analysis(result_queue)
        self.log.info(result)
        #         self.dump_failure_data(failure_record_queue)
        self.assertTrue(success, summary)
Ejemplo n.º 7
0
    def test_rqg_concurrent(self):
        # Get Data Map
        table_map = self.client._get_values_with_type_for_fields_in_table()
        check = True
        failure_map = {}
        batches = []
        batch = []
        test_case_number = 1
        count = 1
        inserted_count = 0
        # Load All the templates
        self.test_file_path = self.unzip_template(self.test_file_path)
        with open(self.test_file_path) as f:
            query_list = f.readlines()
        if self.total_queries == None:
            self.total_queries = len(query_list)
        for n1ql_query_info in query_list:
            data = n1ql_query_info
            batch.append({str(test_case_number): data})
            if count == self.concurreny_count:
                inserted_count += len(batch)
                batches.append(batch)
                count = 1
                batch = []
            else:
                count += 1
            test_case_number += 1
            if test_case_number > self.total_queries:
                break
        if inserted_count != len(query_list):
            batches.append(batch)
        result_queue = queue.Queue()
        input_queue = queue.Queue()
        failure_record_queue = queue.Queue()
        # Run Test Batches
        test_case_number = 1
        thread_list = []
        for i in range(self.concurreny_count):
            t = threading.Thread(target=self._testrun_worker,
                                 args=(input_queue, result_queue,
                                       failure_record_queue))
            t.daemon = True
            t.start()
            thread_list.append(t)
        for test_batch in batches:
            # Build all required secondary Indexes
            list = [data[list(data.keys())[0]] for data in test_batch]
            list = self.client._convert_template_query_info(
                table_map=table_map,
                n1ql_queries=list,
                ansi_joins=self.ansi_joins,
                gen_expected_result=False)

            # Create threads and run the batch
            for test_case in list:
                test_case_input = test_case
                input_queue.put({
                    "test_case_number": test_case_number,
                    "test_data": test_case_input
                })
                test_case_number += 1
            # Capture the results when done
            check = False
        for t in thread_list:
            t.join()

            for bucket in self.buckets:
                BucketOperationHelper.delete_bucket_or_assert(
                    serverInfo=self.master, bucket=bucket)
        # Analyze the results for the failure and assert on the run
        success, summary, result = self._test_result_analysis(result_queue)
        self.log.info(result)
        #self.dump_failure_data(failure_record_queue)
        self.assertTrue(success, summary)