コード例 #1
0
    def collectinfo_test(self):
        """We use cbcollect_info to automatically collect the logs for server node

        First we load some items to the node. Optionally you can do some mutation
        against these items. Then we use cbcollect_info the automatically generate
        the zip file containing all the logs about the node. We want to verify we have
        all the log files according to the LOG_FILE_NAME_LIST and in stats.log, we have
        stats for all the buckets we have created"""

        gen_load = BlobGenerator('nosql',
                                 'nosql-',
                                 self.value_size,
                                 end=self.num_items)
        gen_update = BlobGenerator('nosql',
                                   'nosql-',
                                   self.value_size,
                                   end=(self.num_items / 2 - 1))
        gen_expire = BlobGenerator('nosql',
                                   'nosql-',
                                   self.value_size,
                                   start=self.num_items / 2,
                                   end=(self.num_items * 3 / 4 - 1))
        gen_delete = BlobGenerator('nosql',
                                   'nosql-',
                                   self.value_size,
                                   start=self.num_items * 3 / 4,
                                   end=self.num_items)
        self._load_all_buckets(self.master, gen_load, "create", 0)

        if (self.doc_ops is not None):
            if ("update" in self.doc_ops):
                self._load_all_buckets(self.master, gen_update, "update", 0)
            if ("delete" in self.doc_ops):
                self._load_all_buckets(self.master, gen_delete, "delete", 0)
            if ("expire" in self.doc_ops):
                self._load_all_buckets(self.master, gen_expire, "update",
                                       self.expire_time)
                time.sleep(self.expire_time + 1)
        self._wait_for_stats_all_buckets(self.servers[:self.num_servers])

        self.shell.delete_files("%s.zip" % (self.log_filename))
        self.shell.delete_files(
            "cbcollect_info*"
        )  #This is the folder generated after unzip the log package
        output, error = self.shell.execute_cbcollect_info("%s.zip" %
                                                          (self.log_filename))
        info = self.shell.extract_remote_info()
        type = info.type.lower()
        if type != "windows":
            if len(error) > 0:
                raise Exception(
                    "Command throw out error message. Please check the output of remote_util"
                )
            for output_line in output:
                if output_line.find("ERROR") >= 0 or output_line.find(
                        "Error") >= 0:
                    raise Exception(
                        "Command throw out error message. Please check the output of remote_util"
                    )
        self.verify_results(self.log_filename)
コード例 #2
0
ファイル: rebalanceout.py プロジェクト: ronniedada/testrunner
 def rebalance_out_with_ops(self):
     gen_delete = BlobGenerator('mike',
                                'mike-',
                                self.value_size,
                                start=self.num_items / 2,
                                end=self.num_items)
     gen_create = BlobGenerator('mike',
                                'mike-',
                                self.value_size,
                                start=self.num_items + 1,
                                end=self.num_items * 3 / 2)
     servs_out = [
         self.servers[self.num_servers - i - 1]
         for i in range(self.nodes_out)
     ]
     rebalance = self.cluster.async_rebalance(self.servers[:1], [],
                                              servs_out)
     # define which doc's ops will be performed during rebalancing
     # allows multiple of them but one by one
     if (self.doc_ops is not None):
         if ("update" in self.doc_ops):
             self._load_all_buckets(self.master, self.gen_update, "update",
                                    0)
         if ("create" in self.doc_ops):
             self._load_all_buckets(self.master, gen_create, "create", 0)
         if ("delete" in self.doc_ops):
             self._load_all_buckets(self.master, gen_delete, "delete", 0)
     rebalance.result()
     self._wait_for_stats_all_buckets(self.servers[:self.num_servers -
                                                   self.nodes_out])
     self._verify_all_buckets(self.master, max_verify=self.max_verify)
     self._verify_stats_all_buckets(self.servers[:self.num_servers -
                                                 self.nodes_out])
コード例 #3
0
    def setUp(self):
        super(RebalanceBaseTest, self).setUp()
        self.value_size = self.input.param("value_size", 256)
        self.doc_ops = self.input.param("doc_ops", None)
        if self.doc_ops is not None:
            self.doc_ops = self.doc_ops.split(";")
        self.defaul_map_func = "function (doc) {\n  emit(doc._id, doc);\n}"
        self.default_view_name = "default_view"
        self.default_view = View(self.default_view_name, self.defaul_map_func,
                                 None)

        #define the data that will be used to test
        self.blob_generator = self.input.param("blob_generator", True)
        if self.blob_generator:
            #gen_load data is used for upload before each test(1000 items by default)
            self.gen_load = BlobGenerator('mike',
                                          'mike-',
                                          self.value_size,
                                          end=self.num_items)
            #gen_update is used for doing mutation for 1/2th of uploaded data
            self.gen_update = BlobGenerator('mike',
                                            'mike-',
                                            self.value_size,
                                            end=(self.num_items / 2 - 1))
            #upload data before each test
            self._load_all_buckets(self.servers[0], self.gen_load, "create", 0)
        else:
            self._load_doc_data_all_buckets()
コード例 #4
0
ファイル: simple.py プロジェクト: mschoch/testrunner
 def setUp(self):
     super(ElasticSearchSimpleTests, self).setUp()
     self.value_size = self.input.param("value_size", 256)
     self.nodes_in = self.input.param("nodes_in", 1)
     self.nodes_out = self.input.param("nodes_out", 1)
     self.doc_ops = self.input.param("doc_ops", None)
     if self.doc_ops is not None:
         self.doc_ops = self.doc_ops.split(";")
     #define the data that will be used to test
     self.blob_generator = self.input.param("blob_generator", True)
     if self.blob_generator:
         #gen_load data is used for upload before each test(1000 items by default)
         self.gen_load = BlobGenerator('mike',
                                       'mike-',
                                       self.value_size,
                                       end=self.num_items)
         #gen_update is used for doing mutation for 1/2th of uploaded data
         self.gen_update = BlobGenerator('mike',
                                         'mike-',
                                         self.value_size,
                                         end=(self.num_items / 2 - 1))
         #upload data before each test
         self._load_all_buckets(self.servers[0], self.gen_load, "create", 0)
     else:
         self._load_doc_data_all_buckets()
     self.log.warning("after setUp")
コード例 #5
0
 def rebalance_in_with_ops(self):
     gen_delete = BlobGenerator('mike',
                                'mike-',
                                self.value_size,
                                start=self.num_items / 2,
                                end=self.num_items)
     gen_create = BlobGenerator('mike',
                                'mike-',
                                self.value_size,
                                start=self.num_items + 1,
                                end=self.num_items * 3 / 2)
     servs_in = [
         self.servers[i + self.nodes_init] for i in range(self.nodes_in)
     ]
     rebalance = self.cluster.async_rebalance(
         self.servers[:self.nodes_init], servs_in, [])
     if (self.doc_ops is not None):
         tasks = []
         # define which doc's ops will be performed during rebalancing
         # allows multiple of them but one by one
         if ("update" in self.doc_ops):
             tasks += self._async_load_all_buckets(self.master,
                                                   self.gen_update,
                                                   "update", 0)
         if ("create" in self.doc_ops):
             tasks += self._async_load_all_buckets(self.master, gen_create,
                                                   "create", 0)
         if ("delete" in self.doc_ops):
             tasks += self._async_load_all_buckets(self.master, gen_delete,
                                                   "delete", 0)
         for task in tasks:
             task.result()
     rebalance.result()
     self.verify_cluster_stats(self.servers[:self.nodes_in +
                                            self.nodes_init])
コード例 #6
0
ファイル: simple.py プロジェクト: mschoch/testrunner
 def load_data(self):
     self.log.warning("before simple")
     gen_delete = BlobGenerator('mike',
                                'mike-',
                                self.value_size,
                                start=self.num_items / 2,
                                end=self.num_items)
     gen_create = BlobGenerator('mike',
                                'mike-',
                                self.value_size,
                                start=self.num_items + 1,
                                end=self.num_items * 3 / 2)
     if (self.doc_ops is not None):
         tasks = []
         # define which doc's ops will be performed during rebalancing
         # allows multiple of them but one by one
         if ("update" in self.doc_ops):
             tasks += self._async_load_all_buckets(self.master,
                                                   self.gen_update,
                                                   "update", 0)
         if ("create" in self.doc_ops):
             tasks += self._async_load_all_buckets(self.master, gen_create,
                                                   "create", 0)
         if ("delete" in self.doc_ops):
             tasks += self._async_load_all_buckets(self.master, gen_delete,
                                                   "delete", 0)
         for task in tasks:
             task.result()
     self.verify_cluster_stats(self.servers[:1])
     super(ElasticSearchSimpleTests,
           self)._wait_for_elasticsearch(self.servers[:1])
     super(ElasticSearchSimpleTests,
           self)._verify_elasticsearch(self.servers[:1])
     self.log.warning("after simple")
コード例 #7
0
 def rebalance_in_with_ops_batch(self):
     gen_delete = BlobGenerator('mike',
                                'mike-',
                                self.value_size,
                                start=(self.num_items / 2 - 1),
                                end=self.num_items)
     gen_create = BlobGenerator('mike',
                                'mike-',
                                self.value_size,
                                start=self.num_items + 1,
                                end=self.num_items * 3 / 2)
     servs_in = [self.servers[i + 1] for i in range(self.nodes_in)]
     rebalance = self.cluster.async_rebalance(self.servers[:1], servs_in,
                                              [])
     if (self.doc_ops is not None):
         # define which doc's ops will be performed during rebalancing
         # allows multiple of them but one by one
         if ("update" in self.doc_ops):
             self._load_all_buckets(self.servers[0],
                                    self.gen_update,
                                    "update",
                                    0,
                                    1,
                                    4294967295,
                                    True,
                                    batch_size=20000,
                                    pause_secs=5,
                                    timeout_secs=180)
         if ("create" in self.doc_ops):
             self._load_all_buckets(self.servers[0],
                                    gen_create,
                                    "create",
                                    0,
                                    1,
                                    4294967295,
                                    True,
                                    batch_size=20000,
                                    pause_secs=5,
                                    timeout_secs=180)
         if ("delete" in self.doc_ops):
             self._load_all_buckets(self.servers[0],
                                    gen_delete,
                                    "delete",
                                    0,
                                    1,
                                    4294967295,
                                    True,
                                    batch_size=20000,
                                    pause_secs=5,
                                    timeout_secs=180)
     rebalance.result()
     self._wait_for_stats_all_buckets(self.servers[:self.nodes_in + 1])
     self._verify_all_buckets(self.master,
                              1,
                              1000,
                              None,
                              only_store_hash=True,
                              batch_size=5000)
     self._verify_stats_all_buckets(self.servers[:self.nodes_in + 1])
コード例 #8
0
 def setUp(self):
     super(Rebalance, self).setUp()
     if self._replication_direction_str in "bidirection":
         self.gen_create2 = BlobGenerator('LoadTwo', 'LoadTwo', self._value_size, end=self._num_items)
         self.gen_delete2 = BlobGenerator('LoadTwo', 'LoadTwo-', self._value_size,
             start=int((self._num_items) * (float)(100 - self._percent_delete) / 100), end=self._num_items)
         self.gen_update2 = BlobGenerator('LoadTwo', 'LoadTwo-', self._value_size, start=0,
             end=int(self._num_items * (float)(self._percent_update) / 100))
コード例 #9
0
    def setUp(self):
        super(bidirectional, self).setUp()

        self.gen_create2 = BlobGenerator('loadTwo', 'loadTwo', self._value_size, end=self._num_items)
        self.gen_delete2 = BlobGenerator('loadTwo', 'loadTwo-', self._value_size,
            start=int((self._num_items) * (float)(100 - self._percent_delete) / 100), end=self._num_items)
        self.gen_update2 = BlobGenerator('loadTwo', 'loadTwo-', self._value_size, start=0,
            end=int(self._num_items * (float)(self._percent_update) / 100))
コード例 #10
0
    def healthchecker_test(self):

        gen_load = BlobGenerator('nosql',
                                 'nosql-',
                                 self.value_size,
                                 end=self.num_items)
        gen_update = BlobGenerator('nosql',
                                   'nosql-',
                                   self.value_size,
                                   end=(self.num_items / 2 - 1))
        gen_expire = BlobGenerator('nosql',
                                   'nosql-',
                                   self.value_size,
                                   start=self.num_items / 2,
                                   end=(self.num_items * 3 / 4 - 1))
        gen_delete = BlobGenerator('nosql',
                                   'nosql-',
                                   self.value_size,
                                   start=self.num_items * 3 / 4,
                                   end=self.num_items)
        self._load_all_buckets(self.master, gen_load, "create", 0)

        if (self.doc_ops is not None):
            if ("update" in self.doc_ops):
                self._load_all_buckets(self.master, gen_update, "update", 0)
            if ("delete" in self.doc_ops):
                self._load_all_buckets(self.master, gen_delete, "delete", 0)
            if ("expire" in self.doc_ops):
                self._load_all_buckets(self.master, gen_expire, "update",
                                       self.expire_time)
                time.sleep(self.expire_time + 1)
        self._wait_for_stats_all_buckets(self.servers[:self.num_servers])

        self.shell.delete_files(self.report_folder_name)

        output, error = self.shell.execute_cbhealthchecker(
            self.couchbase_usrname, self.couchbase_password,
            self.command_options)

        if self.os != "windows":
            if len(error) > 0:
                raise Exception(
                    "Command throw out error message. Please check the output of remote_util"
                )
        for output_line in output:
            if output_line.find("ERROR") >= 0 or output_line.find(
                    "Error") >= 0:
                raise Exception(
                    "Command throw out error message. Please check the output of remote_util"
                )
            if output_line.find('Exception launched') >= 0:
                raise Exception(
                    "There are python code exceptions when execute the cbhealthchecker"
                )
        self.verify_results(output)
コード例 #11
0
    def CreateUpdateDeleteExpireDuringBackup(self):
        """Backup the items during mutation on existing items is running.

        We first load amount of items. After that, when we start backup, we begin do mutations on these existing items."""

        gen_load = BlobGenerator('mysql', 'mysql-', self.value_size, end=self.num_items)
        gen_update = BlobGenerator('mysql', 'mysql-', self.value_size, end=(self.num_items / 2 - 1))
        gen_expire = BlobGenerator('mysql', 'mysql-', self.value_size, start=self.num_items / 2, end=(self.num_items * 3 / 4 - 1))
        gen_delete = BlobGenerator('mysql', 'mysql-', self.value_size, start=self.num_items * 3 / 4, end=self.num_items)
        self._load_all_buckets(self.master, gen_load, "create", 0, 1, 0, True, batch_size=20000, pause_secs=5, timeout_secs=180)
        self._wait_for_stats_all_buckets(self.servers[:self.num_servers])

        mutate_threads = []
        if(self.doc_ops is not None):
            if("update" in self.doc_ops):
                mutate_threads.append(self._async_load_all_buckets(self.master, gen_update, "update", 0, 1, 0, True, batch_size=20000))

            if("delete" in self.doc_ops):
                mutate_threads.append(self._async_load_all_buckets(self.master, gen_delete, "delete", 0, 1, 0, True, batch_size=20000))

            if("expire" in self.doc_ops):
                mutate_threads.append(self._async_load_all_buckets(self.master, gen_expire, "update", self.expire_time, 1, 0, True, batch_size=20000))

        first_backup_thread = Thread(target=self.shell.execute_cluster_backup,
                                     name="backup",
                                     args=(self.couchbase_login_info, self.backup_location, self.command_options))
        first_backup_thread.start()
        first_backup_thread.join()

        for t in mutate_threads:
            for task in t:
                task.result()

        kvs_before = {}
        for bucket in self.buckets:
            kvs_before[bucket.name] = bucket.kvs[1]
        self._all_buckets_delete(self.master)
        gc.collect()

        if self.default_bucket:
            self.cluster.create_default_bucket(self.master, self.bucket_size, self.num_replicas)
            self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="", num_replicas=self.num_replicas, bucket_size=self.bucket_size))
        self._create_sasl_buckets(self.master, self.sasl_buckets)
        self._create_standard_buckets(self.master, self.standard_buckets)

        for bucket in self.buckets:
            bucket.kvs[1] = kvs_before[bucket.name]
        del kvs_before
        gc.collect()
        bucket_names = [bucket.name for bucket in self.buckets]
        self.shell.restore_backupFile(self.couchbase_login_info, self.backup_location, bucket_names)
        time.sleep(self.expire_time) #system sleeps for expired items
        self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
コード例 #12
0
    def LoadDuringBackup(self):
        """Backup the items during data loading is running.

        We first load a number of items. Then we start backup while loading another amount number of items into
        cluster as "noise" during the backup. During verification, we want to make sure that every item before backup
        starts can be restored correctly."""

        gen_load_backup = BlobGenerator('couchdb', 'couchdb', self.value_size, end=self.backup_items)
        self._load_all_buckets(self.master, gen_load_backup, "create", 0, 2, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
        #store items before backup starts to kvstores[2]
        self._wait_for_stats_all_buckets(self.servers[:self.num_servers])

        gen_load = BlobGenerator('mysql', 'mysql-', self.value_size, end=self.num_items)
        data_load_thread = Thread(target=self._load_all_buckets,
                                  name="load_data",
                                  args=(self.master, gen_load, "create", 0, 1, 0, True))
        #store noise items during backup to kvstores[1]

        backup_thread = Thread(target=self.shell.execute_cluster_backup,
                               name="backup",
                               args=(self.couchbase_login_info, self.backup_location, self.command_options))

        backup_thread.start()
        data_load_thread.start()
        data_load_thread.join()
        backup_thread.join()
        #TODO: implement a mechanism to check the backup progress to prevent backup_thread hangs up
        self._wait_for_stats_all_buckets(self.servers[:self.num_servers])

        kvs_before = {}
        for bucket in self.buckets:
            kvs_before[bucket.name] = bucket.kvs[2]
        self._all_buckets_delete(self.master)
        gc.collect()

        if self.default_bucket:
            self.cluster.create_default_bucket(self.master, self.bucket_size, self.num_replicas)
            self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="", num_replicas=self.num_replicas, bucket_size=self.bucket_size))
        self._create_sasl_buckets(self.master, self.sasl_buckets)
        self._create_standard_buckets(self.master, self.standard_buckets)

        for bucket in self.buckets:
            bucket.kvs[2] = kvs_before[bucket.name]
        del kvs_before
        gc.collect()
        bucket_names = [bucket.name for bucket in self.buckets]
        self.shell.restore_backupFile(self.couchbase_login_info, self.backup_location, bucket_names)

        for bucket in self.buckets:
            del bucket.kvs[1]
        self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
        self.verify_results(self.master, 2) #do verification only with kvstores[2]
コード例 #13
0
ファイル: rebalanceout.py プロジェクト: Boggypop/testrunner
 def incremental_rebalance_out_with_ops(self):
     batch_size = 1000
     for i in reversed(range(1, self.num_servers, 2)):
         if i == 1:
             batch_size = 1
         rebalance = self.cluster.async_rebalance(self.servers[:i], [],
                                                  self.servers[i:i + 2])
         if self.doc_ops is not None:
             # define which doc's operation will be performed during rebalancing
             #only one type of ops can be passed
             if ("update" in self.doc_ops):
                 # 1/2th of data will be updated in each iteration
                 self._load_all_buckets(self.master,
                                        self.gen_update,
                                        "update",
                                        0,
                                        batch_size=batch_size)
             elif ("create" in self.doc_ops):
                 # 1/2th of initial data will be added in each iteration
                 gen_create = BlobGenerator(
                     'mike',
                     'mike-',
                     self.value_size,
                     start=self.num_items * (1 + i) / 2.0,
                     end=self.num_items * (1 + i / 2.0))
                 self._load_all_buckets(self.master,
                                        gen_create,
                                        "create",
                                        0,
                                        timeout_secs=60,
                                        batch_size=batch_size)
             elif ("delete" in self.doc_ops):
                 # 1/(num_servers) of initial data will be removed after each iteration
                 # at the end we should get empty base( or couple items)
                 gen_delete = BlobGenerator(
                     'mike',
                     'mike-',
                     self.value_size,
                     start=int(self.num_items *
                               (1 - i / (self.num_servers - 1.0))) + 1,
                     end=int(self.num_items * (1 - (i - 1) /
                                               (self.num_servers - 1.0))))
                 self._load_all_buckets(self.master,
                                        gen_delete,
                                        "delete",
                                        0,
                                        timeout_secs=60,
                                        batch_size=batch_size)
         rebalance.result()
         self.verify_cluster_stats(self.servers[:i])
コード例 #14
0
    def CreateUpdateDeleteExpireBeforeBackup(self):
        """Backup up the buckets after operations: update, delete, expire.

        We load a number of items first and then load some extra items. We do update, delete, expire operation
        on those extra items. After these mutations, we backup all the items and restore them for verification """

        gen_load = BlobGenerator('mysql', 'mysql-', self.value_size, end=self.num_items)
        gen_extra = BlobGenerator('couchdb', 'couchdb-', self.value_size, end=self.num_mutate_items)
        self._load_all_buckets(self.master, gen_load, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
        extra_items_deleted_flag = 0

        if(self.doc_ops is not None):
            self._load_all_buckets(self.master, gen_extra, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
            if("update" in self.doc_ops):
                self._load_all_buckets(self.master, gen_extra, "update", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
            if("delete" in self.doc_ops):
                self._load_all_buckets(self.master, gen_extra, "delete", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
                extra_items_deleted_flag = 1
            if("expire" in self.doc_ops):
                if extra_items_deleted_flag == 1:
                    self._load_all_buckets(self.master, gen_extra, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
                self._load_all_buckets(self.master, gen_extra, "update", self.expire_time, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
        self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
        time.sleep(30)

        self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, self.command_options)

        kvs_before = {}
        for bucket in self.buckets:
            kvs_before[bucket.name] = bucket.kvs[1]
        self._all_buckets_delete(self.master)
        gc.collect()

        if self.default_bucket:
            self.cluster.create_default_bucket(self.master, self.bucket_size, self.num_replicas)
            self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="", num_replicas=self.num_replicas, bucket_size=self.bucket_size))
        self._create_sasl_buckets(self.master, self.sasl_buckets)
        self._create_standard_buckets(self.master, self.standard_buckets)

        for bucket in self.buckets:
            bucket.kvs[1] = kvs_before[bucket.name]
        del kvs_before
        gc.collect()
        bucket_names = [bucket.name for bucket in self.buckets]
        self.shell.restore_backupFile(self.couchbase_login_info, self.backup_location, bucket_names)
        time.sleep(self.expire_time) #system sleeps for expired items

        self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
        self.verify_results(self.master)
        self._verify_stats_all_buckets(self.servers[:self.num_servers])
コード例 #15
0
    def CreateUpdateDeleteBeforeBackup(self):
        """Back up the buckets after doing docs operations: create, update, delete, recreate.

        We load 2 kinds of items into the cluster with different key value prefix. Then we do
        mutations on part of the items according to clients' input param. After backup, we
        delete the existing buckets then recreate them and restore all the buckets. We verify
        the results by comparison between the items in KVStore and restored buckets items."""

        gen_load_mysql = BlobGenerator('mysql', 'mysql-', self.value_size, end=(self.num_items/2-1))
        gen_load_couchdb = BlobGenerator('couchdb', 'couchdb-', self.value_size, start=self.num_items/2, end=self.num_items)
        gen_update = BlobGenerator('mysql', 'mysql-', self.value_size, end=(self.num_items / 2 - 1))
        gen_delete = BlobGenerator('couchdb', 'couchdb-', self.value_size, start=self.num_items / 2, end=self.num_items)
        gen_create = BlobGenerator('mysql', 'mysql-', self.value_size, start=self.num_items / 2 + 1, end=self.num_items *3 / 2)
        self._load_all_buckets(self.master, gen_load_mysql, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
        self._load_all_buckets(self.master, gen_load_couchdb, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)

        if(self.doc_ops is not None):
            if("update" in self.doc_ops):
                self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
            if("create" in self.doc_ops):
                self._load_all_buckets(self.master, gen_create, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
            if("delete" in self.doc_ops):
                self._load_all_buckets(self.master, gen_delete, "delete", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
        self._wait_for_stats_all_buckets(self.servers[:self.num_servers])

        self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, self.command_options)

        kvs_before = {}
        for bucket in self.buckets:
            kvs_before[bucket.name] = bucket.kvs[1]
        bucket_names = [bucket.name for bucket in self.buckets]
        self._all_buckets_delete(self.master)
        gc.collect()

        if self.default_bucket:
            self.cluster.create_default_bucket(self.master, self.bucket_size, self.num_replicas)
            self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="", num_replicas=self.num_replicas, bucket_size=self.bucket_size))
        self._create_sasl_buckets(self.master, self.sasl_buckets)
        self._create_standard_buckets(self.master, self.standard_buckets)

        for bucket in self.buckets:
            bucket.kvs[1] = kvs_before[bucket.name]
        del kvs_before
        gc.collect()
        self.shell.restore_backupFile(self.couchbase_login_info, self.backup_location, bucket_names)

        self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
        self.verify_results(self.master)
        self._verify_stats_all_buckets(self.servers[:self.num_servers])
コード例 #16
0
ファイル: observetest.py プロジェクト: saigon/testrunner
 def _load_doc_data_all_buckets(self,
                                op_type='create',
                                start=0,
                                end=0,
                                expiry=0):
     loaded = False
     count = 0
     gen_load = BlobGenerator('observe',
                              'observe',
                              1024,
                              start=start,
                              end=end)
     while not loaded and count < 60:
         try:
             self._load_all_buckets(self.servers[0], gen_load, op_type,
                                    expiry)
             loaded = True
         except MemcachedError as error:
             if error.status == 134:
                 loaded = False
                 self.log.error(
                     "Memcached error 134, wait for 5 seconds and then try again"
                 )
                 count += 1
                 time.sleep(5)
コード例 #17
0
    def incremental_rebalance_in_out_with_mutation_and_expiration(self):
        self.cluster.rebalance(self.servers[:self.num_servers],
                               self.servers[1:self.num_servers], [])
        gen_expire = BlobGenerator('mike',
                                   'mike-',
                                   self.value_size,
                                   start=self.num_items / 2,
                                   end=self.num_items)

        for i in reversed(range(self.num_servers)[self.num_servers / 2:]):
            tasks = self._async_load_all_buckets(self.master, self.gen_update,
                                                 "update", 0)
            tasks.extend(
                self._async_load_all_buckets(self.master, gen_expire, "update",
                                             5))

            self.cluster.rebalance(self.servers[:i], [],
                                   self.servers[i:self.num_servers])
            self.sleep(5)
            self.cluster.rebalance(self.servers[:self.num_servers],
                                   self.servers[i:self.num_servers], [])
            for task in tasks:
                task.result()
            self._load_all_buckets(self.master, gen_expire, "create", 0)
            self.verify_cluster_stats(self.servers[:self.num_servers])
コード例 #18
0
    def incremental_rebalance_in_out_with_max_buckets_number(self):
        self.bucket_size = self.input.param("bucket_size", 100)
        bucket_num = max(10, self.quota / self.bucket_size)
        self.log.info('total %s buckets will be created with size %s MB' %
                      (bucket_num, self.bucket_size))
        self.cluster.create_default_bucket(self.master, self.bucket_size,
                                           self.num_replicas)
        self.buckets.append(
            Bucket(name="default",
                   authType="sasl",
                   saslPassword="",
                   num_replicas=self.num_replicas,
                   bucket_size=self.bucket_size))
        self._create_sasl_buckets(self.master, (bucket_num - 1) / 2)

        self._create_standard_buckets(self.master,
                                      bucket_num - 1 - (bucket_num - 1) / 2)
        self.cluster.rebalance(self.servers[:self.num_servers],
                               self.servers[1:self.num_servers], [])
        gen = BlobGenerator('mike',
                            'mike-',
                            self.value_size,
                            end=self.num_items)
        self._load_all_buckets(self.master, gen, "create", 0)

        for i in reversed(range(self.num_servers)[self.num_servers / 2:]):
            self._async_load_all_buckets(self.master, gen, "update", 0)

            self.cluster.rebalance(self.servers[:i], [],
                                   self.servers[i:self.num_servers])
            self.sleep(5)
            self._async_load_all_buckets(self.master, gen, "update", 0)
            self.cluster.rebalance(self.servers[:self.num_servers],
                                   self.servers[i:self.num_servers], [])
            self.verify_cluster_stats(self.servers[:self.num_servers])
コード例 #19
0
ファイル: rebalanceout.py プロジェクト: Boggypop/testrunner
 def incremental_rebalance_out_with_mutation_and_expiration(self):
     gen_2 = BlobGenerator('mike',
                           'mike-',
                           self.value_size,
                           start=self.num_items / 2,
                           end=self.num_items)
     batch_size = 50
     for i in reversed(range(self.num_servers)[2:]):
         #don't use batch for rebalance out 2-1 nodes
         if i == 1:
             batch_size = 1
         rebalance = self.cluster.async_rebalance(self.servers[:i], [],
                                                  [self.servers[i]])
         self._load_all_buckets(self.master,
                                self.gen_update,
                                "update",
                                0,
                                batch_size=batch_size,
                                timeout_secs=60)
         self._load_all_buckets(self.master,
                                gen_2,
                                "update",
                                5,
                                batch_size=batch_size,
                                timeout_secs=60)
         rebalance.result()
         self.sleep(5)
         self._load_all_buckets(self.master, gen_2, "create", 0)
         self.verify_cluster_stats(self.servers[:i])
コード例 #20
0
    def incremental_rebalance_out_in_with_mutation(self):
        init_num_nodes = self.input.param("init_num_nodes", 1)

        self.cluster.rebalance(self.servers[:self.num_servers],
                               self.servers[1:init_num_nodes], [])
        gen = BlobGenerator('mike',
                            'mike-',
                            self.value_size,
                            end=self.num_items)
        self._load_all_buckets(self.master, gen, "create", 0)

        for i in range(self.num_servers):
            tasks = self._async_load_all_buckets(self.master, gen, "update", 0)

            self.cluster.rebalance(
                self.servers[:self.num_servers],
                self.servers[init_num_nodes:init_num_nodes + i + 1], [])
            time.sleep(10)
            self.cluster.rebalance(
                self.servers[:self.num_servers], [],
                self.servers[init_num_nodes:init_num_nodes + i + 1])
            for task in tasks:
                task.result()
            self._wait_for_stats_all_buckets(self.servers[:init_num_nodes])
            self._verify_all_buckets(self.master, max_verify=self.max_verify)
            self._verify_stats_all_buckets(self.servers[:init_num_nodes])
コード例 #21
0
ファイル: connectionstests.py プロジェクト: saigon/testrunner
 def checks_tap_connections_tests(self):
     servs_init = self.servers[:self.nodes_init]
     servs_in = [self.servers[i + self.nodes_init] for i in range(self.nodes_in)]
     servs_out = [self.servers[self.nodes_init - i - 1] for i in range(self.nodes_out)]
     rest = RestConnection(self.master)
     buckets_stats_before = {}
     for bucket in self.buckets:
         _, result = rest.get_bucket_stats_json(bucket)
         buckets_stats_before[bucket.name] = result["op"]["samples"]["ep_tap_user_count"];
     self.log.info("current nodes : {0}".format([node.id for node in rest.node_statuses()]))
     self.log.info("adding nodes {0} to cluster".format(servs_in))
     self.log.info("removing nodes {0} from cluster".format(servs_out))
     result_nodes = set(servs_init + servs_in) - set(servs_out)
     self.cluster.rebalance(servs_init[:self.nodes_init], servs_in, servs_out)
     gen = BlobGenerator('mike2', 'mike2-', self.value_size, end=self.num_items)
     self._load_all_buckets(self.master, gen, "create", 0)
     self.verify_cluster_stats(result_nodes)
     buckets_stats_after = {}
     for bucket in self.buckets:
         _, result = rest.get_bucket_stats_json(bucket)
         buckets_stats_after[bucket.name] = result["op"]["samples"]["ep_tap_user_count"];
         for stat in buckets_stats_after[bucket.name][len(buckets_stats_before[bucket.name]) - 1:]:
             if stat != 0:
                 self.log.error("'ep_tap_user_count' for bucket '{0}' before test:{1}".format(bucket.name, buckets_stats_before[bucket.name]))
                 self.log.error("'ep_tap_user_count' for bucket '{0}' after test:{1}".format(bucket.name, buckets_stats_after[bucket.name]))
                 self.log.error("'ep_tap_user_count' != 0 as expected");
         self.log.info("'ep_tap_user_count' for bucket '{0}' = 0 for the entire test".format(bucket.name));
コード例 #22
0
    def checkpoint_create_time(self):
        """Load data, but let the timeout create a new checkpoint on all replicas"""

        param = 'checkpoint'
        stat_key = 'vb_0:open_checkpoint_id'

        self._set_checkpoint_timeout(self.servers[:self.num_servers],
                                     self.bucket, str(self.timeout))

        generate_load = BlobGenerator('nosql',
                                      'nosql-',
                                      self.value_size,
                                      end=self.num_items)
        self._load_all_buckets(self.master,
                               generate_load,
                               "create",
                               0,
                               1,
                               0,
                               True,
                               batch_size=self.checkpoint_size,
                               pause_secs=5,
                               timeout_secs=180)
        self._wait_for_stats_all_buckets(self.servers[:self.num_servers])

        chk_stats = StatsCommon.get_stats([self.master], self.bucket, param,
                                          stat_key)
        self.log.info("Sleeping for {0} seconds)".format(self.timeout + 5))
        time.sleep(self.timeout + 5)
        self._verify_checkpoint_id(param, stat_key, chk_stats)
        self._verify_stats_all_buckets(self.servers[:self.num_servers])
コード例 #23
0
 def _load_data_all_buckets(self, op_type='create', start=0):
     loaded = False
     count = 0
     gen_load = BlobGenerator('upgrade-',
                              'upgrade-',
                              self.data_size,
                              start=start,
                              end=self.num_items)
     while not loaded and count < 60:
         try:
             self._load_all_buckets(self.master,
                                    gen_load,
                                    op_type,
                                    self.expire_time,
                                    1,
                                    self.item_flag,
                                    True,
                                    batch_size=20000,
                                    pause_secs=5,
                                    timeout_secs=180)
             loaded = True
         except MemcachedError as error:
             if error.status == 134:
                 loaded = False
                 self.log.error(
                     "Memcached error 134, wait for 5 seconds and then try again"
                 )
                 count += 1
                 time.sleep(self.sleep_time)
コード例 #24
0
    def checkpoint_collapse(self):
        """With 3 replicas, stop replication on R2, let Master and R1 close checkpoint.
        Run load until a new checkpoint is created on Master and R1.
        Wait till checkpoints merge on R1. Restart replication of R2.
        Checkpoint should advance to the latest on R2."""

        param = 'checkpoint'
        stat_key = 'vb_0:last_closed_checkpoint_id'
        stat_chk_itms = 'vb_0:num_checkpoint_items'

        self._set_checkpoint_size(self.servers[:self.num_servers], self.bucket,
                                  str(self.checkpoint_size))
        self._stop_replication(self.replica2, self.bucket)

        generate_load = BlobGenerator('nosql',
                                      'nosql-',
                                      self.value_size,
                                      end=self.num_items)
        data_load_thread = Thread(target=self._load_all_buckets,
                                  name="load_data",
                                  args=(self.master, generate_load, "create",
                                        0, 1, 0, True, self.checkpoint_size, 5,
                                        180))
        data_load_thread.start()
        m_stats = StatsCommon.get_stats([self.master], self.bucket, param,
                                        stat_key)

        tasks = []
        chk_pnt = int(m_stats[m_stats.keys()[0]]) + 2
        tasks.append(
            self.cluster.async_wait_for_stats([self.master], self.bucket,
                                              param, stat_key, '>=', chk_pnt))
        tasks.append(
            self.cluster.async_wait_for_stats([self.replica1], self.bucket,
                                              param, stat_key, '>=', chk_pnt))
        tasks.append(
            self.cluster.async_wait_for_stats([self.replica1], self.bucket,
                                              param, stat_chk_itms, '>=',
                                              self.num_items))
        data_load_thread.join()
        for task in tasks:
            try:
                task.result(60)
            except TimeoutError:
                self.fail("Checkpoint not collapsed")

        tasks = []
        self._start_replication(self.replica2, self.bucket)
        tasks.append(
            self.cluster.async_wait_for_stats([self.replica1], self.bucket,
                                              param, stat_chk_itms, '<',
                                              self.num_items))
        for task in tasks:
            try:
                task.result(60)
            except TimeoutError:
                self.fail("Checkpoints not replicated to replica2")

        self._verify_checkpoint_id(param, stat_key, m_stats)
        self._verify_stats_all_buckets(self.servers[:self.num_servers])
コード例 #25
0
    def checkpoint_create_items(self):
        """Load data until a new checkpoint is created on all replicas"""

        param = 'checkpoint'
        stat_key = 'vb_0:open_checkpoint_id'

        self._set_checkpoint_size(self.servers[:self.num_servers], self.bucket,
                                  str(self.checkpoint_size))
        chk_stats = StatsCommon.get_stats([self.master], self.bucket, param,
                                          stat_key)

        generate_load = BlobGenerator('nosql',
                                      'nosql-',
                                      self.value_size,
                                      end=self.num_items)
        self._load_all_buckets(self.master,
                               generate_load,
                               "create",
                               0,
                               1,
                               0,
                               True,
                               batch_size=self.checkpoint_size,
                               pause_secs=5,
                               timeout_secs=180)
        self._wait_for_stats_all_buckets(self.servers[:self.num_servers])

        self._verify_checkpoint_id(param, stat_key, chk_stats)
        self._verify_stats_all_buckets(self.servers[:self.num_servers])
コード例 #26
0
    def setup_doc_gens(self):
        # create json doc generators
        ordering = range(self._num_items / 4)
        sites1 = ['google', 'bing', 'yahoo', 'wiki']
        sites2 = ['mashable', 'techcrunch', 'hackernews', 'slashdot']
        template = '{{ "ordering": {0}, "site_name": "{1}" }}'

        delete_start = int(
            (self._num_items) * (float)(100 - self._percent_delete) / 100)
        update_end = int(
            (self._num_items) * (float)(self._percent_update) / 100)

        self.gen_create =\
            DocumentGenerator('es_xdcr_docs', template, ordering,
                               sites1, start=0, end=self._num_items)

        self.gen_recreate =\
            DocumentGenerator('es_xdcr_docs', template, ordering,
                               sites2, start=0, end=self._num_items)

        self.gen_update =\
            DocumentGenerator('es_xdcr_docs', template, ordering,
                               sites1, start=0, end=update_end)
        self.gen_delete =\
            DocumentGenerator('es_xdcr_docs', template, ordering,
                               sites1, start=delete_start, end=self._num_items)

        self.gen_blob = BlobGenerator('loadOne',
                                      'loadOne',
                                      self._value_size,
                                      end=self._num_items)
コード例 #27
0
    def incremental_rebalance_in_out_with_mutation(self):
        self.cluster.rebalance(self.servers[:self.num_servers],
                               self.servers[1:self.num_servers], [])
        gen = BlobGenerator('mike',
                            'mike-',
                            self.value_size,
                            end=self.num_items)
        self._load_all_buckets(self.master, gen, "create", 0)

        for i in reversed(range(self.num_servers)[self.num_servers / 2:]):
            tasks = self._async_load_all_buckets(self.master, gen, "update", 0)

            self.cluster.rebalance(self.servers[:i], [],
                                   self.servers[i:self.num_servers])
            time.sleep(5)
            for task in tasks:
                task.result()
            tasks = self._async_load_all_buckets(self.master, gen, "update", 0)
            self.cluster.rebalance(self.servers[:self.num_servers],
                                   self.servers[i:self.num_servers], [])
            for task in tasks:
                task.result()
            self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
            self._verify_all_buckets(self.master, max_verify=self.max_verify)
            self._verify_stats_all_buckets(self.servers[:self.num_servers])
コード例 #28
0
    def load_data(self):
        gen_load = BlobGenerator('nosql', 'nosql-', self.value_size, end=self.num_items)
        gen_update = BlobGenerator('nosql', 'nosql-', self.value_size, end=(self.num_items/2-1))
        gen_expire = BlobGenerator('nosql', 'nosql-', self.value_size, start=self.num_items/2, end=(self.num_items*3/4-1))
        gen_delete = BlobGenerator('nosql', 'nosql-', self.value_size, start=self.num_items*3/4, end=self.num_items)
        self._load_all_buckets(self.server_origin, gen_load, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)

        if(self.doc_ops is not None):
            if("update" in self.doc_ops):
                self._load_all_buckets(self.server_origin, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
            if("delete" in self.doc_ops):
                self._load_all_buckets(self.server_origin, gen_delete, "delete", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
            if("expire" in self.doc_ops):
                self._load_all_buckets(self.server_origin, gen_expire, "update", self.expire_time, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
        self._wait_for_stats_all_buckets([self.server_origin])
        time.sleep(30)
コード例 #29
0
    def collectinfo_test(self):
        """We use cbcollect_info to automatically collect the logs for server node

        First we load some items to the node. Optionally you can do some mutation
        against these items. Then we use cbcollect_info the automatically generate
        the zip file containing all the logs about the node. We want to verify we have
        all the log files according to the LOG_FILE_NAME_LIST and in stats.log, we have
        stats for all the buckets we have created"""

        gen_load = BlobGenerator('nosql',
                                 'nosql-',
                                 self.value_size,
                                 end=self.num_items)
        gen_update = BlobGenerator('nosql',
                                   'nosql-',
                                   self.value_size,
                                   end=(self.num_items / 2 - 1))
        gen_expire = BlobGenerator('nosql',
                                   'nosql-',
                                   self.value_size,
                                   start=self.num_items / 2,
                                   end=(self.num_items * 3 / 4 - 1))
        gen_delete = BlobGenerator('nosql',
                                   'nosql-',
                                   self.value_size,
                                   start=self.num_items * 3 / 4,
                                   end=self.num_items)
        self._load_all_buckets(self.master, gen_load, "create", 0)

        if (self.doc_ops is not None):
            if ("update" in self.doc_ops):
                self._load_all_buckets(self.master, gen_update, "update", 0)
            if ("delete" in self.doc_ops):
                self._load_all_buckets(self.master, gen_delete, "delete", 0)
            if ("expire" in self.doc_ops):
                self._load_all_buckets(self.master, gen_expire, "update",
                                       self.expire_time)
                time.sleep(self.expire_time + 1)
        self._wait_for_stats_all_buckets(self.servers[:self.num_servers])

        self.shell.delete_files("%s.zip" % (self.log_filename))
        self.shell.delete_files(
            "cbcollect_info*"
        )  #This is the folder generated after unzip the log package
        self.shell.execute_cbcollect_info("%s.zip" % (self.log_filename))
        self.verify_results(self.log_filename)
コード例 #30
0
ファイル: bucketflush.py プロジェクト: ronniedada/testrunner
 def setUp(self):
     super(BucketFlushTests, self).setUp()
     self.nodes_in = self.input.param("nodes_in", 0)
     self.value_size = self.input.param("value_size", 256)
     self.gen_create = BlobGenerator('bucketflush',
                                     'bucketflush-',
                                     self.value_size,
                                     end=self.num_items)
コード例 #31
0
ファイル: CCCP.py プロジェクト: abhinavdangeti/testrunner
 def test_not_my_vbucket_config(self):
     self.gen_load = BlobGenerator('cccp', 'cccp-', self.value_size, end=self.num_items)
     self._load_all_buckets(self.master, self.gen_load, "create", 0)
     self.cluster.rebalance(self.servers[:self.nodes_init],
                            self.servers[self.nodes_init:self.nodes_init + 1], [])
     self.nodes_init = self.nodes_init + 1
     for bucket in self.buckets:
         while self.gen_load.has_next():
             key, _ = self.gen_load.next()
             try:
                 self.clients[bucket.name].get(key)
             except Exception, ex:
                 self.log.info("Config in exception is correct. Bucket %s, key %s" % (bucket.name, key))
                 config = str(ex)[str(ex).find("Not my vbucket':") + 16 : str(ex).find("for vbucket")]
                 config = json.loads(config)
                 self.verify_config(config, bucket)
コード例 #32
0
ファイル: CCCP.py プロジェクト: abhinavdangeti/testrunner
class CCCP(BaseTestCase):

    def setUp(self):
        super(CCCP, self).setUp()
        self.map_fn = 'function (doc){emit([doc.join_yr, doc.join_mo],doc.name);}'
        self.ddoc_name = "cccp_ddoc"
        self.view_name = "cccp_view"
        self.default_view = View(self.view_name, self.map_fn, None, False)
        self.ops = self.input.param("ops", None)
        self.clients = {}
        try:
            for bucket in self.buckets:
                self.clients[bucket.name] =\
                  MemcachedClientHelper.direct_client(self.master, bucket.name)
        except:
            self.tearDown()

    def tearDown(self):
        super(CCCP, self).tearDown()

    def test_get_config_client(self):
        tasks = self.run_ops()
        for bucket in self.buckets:
            _, _, config = self.clients[bucket.name].get_config()
            self.verify_config(json.loads(config), bucket)
        for task in tasks:
            task.result()

    def test_get_config_rest(self):
        tasks = self.run_ops()
        for bucket in self.buckets:
            config = RestConnection(self.master).get_bucket_CCCP(bucket)
            self.verify_config(config, bucket)
        for task in tasks:
            task.result()

    def test_set_config(self):
        tasks = self.run_ops()
        config_expected = 'abcabc'
        for bucket in self.buckets:
            self.clients[bucket.name].set_config(config_expected)
            _, _, config = self.clients[bucket.name].get_config()
            self.assertEquals(config_expected, config, "Expected config: %s, actual %s" %(
                                                      config_expected, config))
            self.log.info("Config was set correctly. Bucket %s" % bucket.name)
        for task in tasks:
            task.result()

    def test_not_my_vbucket_config(self):
        self.gen_load = BlobGenerator('cccp', 'cccp-', self.value_size, end=self.num_items)
        self._load_all_buckets(self.master, self.gen_load, "create", 0)
        self.cluster.rebalance(self.servers[:self.nodes_init],
                               self.servers[self.nodes_init:self.nodes_init + 1], [])
        self.nodes_init = self.nodes_init + 1
        for bucket in self.buckets:
            while self.gen_load.has_next():
                key, _ = self.gen_load.next()
                try:
                    self.clients[bucket.name].get(key)
                except Exception, ex:
                    self.log.info("Config in exception is correct. Bucket %s, key %s" % (bucket.name, key))
                    config = str(ex)[str(ex).find("Not my vbucket':") + 16 : str(ex).find("for vbucket")]
                    config = json.loads(config)
                    self.verify_config(config, bucket)