Example #1
0
def initialize_bucket(name, port=None, saslPassword=None):
    if saslPassword:
       return Bucket(name=name, authType="sasl", saslPassword=saslPassword)
    elif port:
       return Bucket(name=name, authType=None, saslPassword=None, port=port)
    else:
       return Bucket(name=name, authType="sasl", saslPassword=None)
Example #2
0
    def test_valid_bucket_name(self, password='******'):
            tasks = []
            shared_params = self._create_bucket_params(server=self.server, size=self.bucket_size,
                                                              replicas=self.num_replicas)
            if self.bucket_type == 'sasl':
                self.cluster.create_sasl_bucket(name=self.bucket_name, password=password,bucket_params=shared_params)
                self.buckets.append(Bucket(name=self.bucket_name, authType="sasl", saslPassword=password, num_replicas=self.num_replicas,
                                           bucket_size=self.bucket_size, master_id=self.server))
            elif self.bucket_type == 'standard':
                self.cluster.create_standard_bucket(name=self.bucket_name, port=STANDARD_BUCKET_PORT+1,
                                                    bucket_params=shared_params)
                self.buckets.append(Bucket(name=self.bucket_name, authType=None, saslPassword=None, num_replicas=self.num_replicas,
                                           bucket_size=self.bucket_size, port=STANDARD_BUCKET_PORT + 1, master_id=self.server))
            elif self.bucket_type == "memcached":
                tasks.append(self.cluster.async_create_memcached_bucket(name=self.bucket_name,
                                                                        port=STANDARD_BUCKET_PORT+1,
                                                                        bucket_params=shared_params))

                self.buckets.append(Bucket(name=self.bucket_name, authType=None, saslPassword=None,
                                           num_replicas=self.num_replicas, bucket_size=self.bucket_size,
                                           port=STANDARD_BUCKET_PORT + 1 , master_id=self.server, type='memcached'))
                for task in tasks:
                    task.result()
            else:
                self.log.error('Bucket type not specified')
                return
            self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(self.bucket_name, self.rest),
                            msg='failed to start up bucket with name "{0}'.format(self.bucket_name))
            gen_load = BlobGenerator('buckettest', 'buckettest-', self.value_size, start=0, end=self.num_items)
            self._load_all_buckets(self.server, gen_load, "create", 0)
            self.cluster.bucket_delete(self.server, self.bucket_name)
            self.assertTrue(BucketOperationHelper.wait_for_bucket_deletion(self.bucket_name, self.rest, timeout_in_seconds=60),
                            msg='bucket "{0}" was not deleted even after waiting for 30 seconds'.format(self.bucket_name))
Example #3
0
    def test_ephemeral_buckets(self):
        shared_params = self._create_bucket_params(server=self.server,
                                                   size=100,
                                                   replicas=self.num_replicas,
                                                   bucket_type='ephemeral')
        # just do sasl for now, pending decision on support of non-sasl buckets in 5.0
        self.cluster.create_sasl_bucket(name=self.bucket_name,
                                        password=self.sasl_password,
                                        bucket_params=shared_params)
        self.buckets.append(
            Bucket(name=self.bucket_name,
                   authType="sasl",
                   saslPassword=self.sasl_password,
                   num_replicas=self.num_replicas,
                   bucket_size=self.bucket_size,
                   master_id=self.server))

        self.assertTrue(BucketOperationHelper.wait_for_bucket_creation(
            self.bucket_name, self.rest),
                        msg='failed to start up bucket with name "{0}'.format(
                            self.bucket_name))
        gen_load = BlobGenerator('buckettest',
                                 'buckettest-',
                                 self.value_size,
                                 start=0,
                                 end=self.num_items)
        self._load_all_buckets(self.server, gen_load, "create", 0)
        self.cluster.bucket_delete(self.server, self.bucket_name)
        self.assertTrue(
            BucketOperationHelper.wait_for_bucket_deletion(
                self.bucket_name, self.rest, timeout_in_seconds=60),
            msg='bucket "{0}" was not deleted even after waiting for 30 seconds'
            .format(self.bucket_name))
Example #4
0
    def _create_buckets(self, nodes):
        if self._dgm_run_bool:
            self._mem_quota_int = 256
        master_node = nodes[0]
        bucket_size = self._get_bucket_size(master_node, nodes,
                                            self._mem_quota_int,
                                            self._default_bucket)
        rest = RestConnection(master_node)
        master_id = rest.get_nodes_self().id

        if self._sasl_buckets > 0:
            self._create_sasl_buckets(master_node, master_id, bucket_size)
        if self._standard_buckets > 0:
            self._create_standard_buckets(master_node, master_id, bucket_size)
        if self._default_bucket:
            self._cluster_helper.create_default_bucket(master_node,
                                                       bucket_size,
                                                       self._num_replicas)
            self._buckets.append(
                Bucket(name="default",
                       authType="sasl",
                       saslPassword="",
                       num_replicas=self._num_replicas,
                       bucket_size=bucket_size,
                       master_id=master_id))
Example #5
0
 def setUp(self):
     super(NodeServiceTests, self).setUp()
     self.helper = ServerHelper(self)
     num_buckets = self.input.param("num_buckets", 1)
     compression = self.input.param("sdk_compression", True)
     for i in range(num_buckets):
         RestConnection(self.servers[0]).create_bucket(
             bucket='bucket%s' % i,
             ramQuotaMB=100,
             proxyPort=STANDARD_BUCKET_PORT + i + 1)
         gen_load = BlobGenerator('ui', 'ui-', 256, start=0, end=10)
         cluster = Cluster()
         try:
             gen = copy.deepcopy(gen_load)
             task = cluster.async_load_gen_docs(self.servers[0],
                                                'bucket%s' % i,
                                                gen,
                                                Bucket().kvs[1],
                                                'create',
                                                0,
                                                0,
                                                True,
                                                1,
                                                1,
                                                30,
                                                compression=compression)
             task.result()
         finally:
             cluster.shutdown()
     BaseHelper(self).login()
 def setUp(self):
     try:
         if 'first_case' not in TestInputSingleton.input.test_params:
             TestInputSingleton.input.test_params['default_bucket'] = False
             TestInputSingleton.input.test_params['skip_cleanup'] = True
         self.default_bucket_name = 'default'
         super(SpatialQueryErrorsTests, self).setUp()
         if 'first_case' in TestInputSingleton.input.test_params:
             self.cluster.rebalance(self.servers[:], self.servers[1:], [])
         # We use only one bucket in this test suite
         self.rest = RestConnection(self.master)
         self.bucket = self.rest.get_bucket(
             Bucket(name=self.default_bucket_name))
         # num_docs must be a multiple of the number of vbuckets
         self.num_docs = self.input.param("num_docs", 2000)
         # `testname` is used for the design document name as wel as the
         # spatial function name
         self.testname = 'query-errors'
         self.helper = SpatialHelper(self, "default")
         if 'first_case' in TestInputSingleton.input.test_params:
             self.create_ddoc()
             self.helper.insert_docs(self.num_docs, self.testname)
     except Exception as ex:
         self.input.test_params["stop-on-failure"] = True
         self.log.error("SETUP WAS FAILED. ALL TESTS WILL BE SKIPPED")
         self.fail(ex)
Example #7
0
    def _create_buckets(self, nodes):
        master_node = nodes[0]
        if self.src_master.ip in [node.ip for node in nodes]:
            buckets = self.buckets_on_src
        elif self.dest_master.ip in [node.ip for node in nodes]:
            buckets = self.buckets_on_dest

        bucket_size = self._get_bucket_size(self._mem_quota_int, len(buckets))
        rest = RestConnection(master_node)
        master_id = rest.get_nodes_self().id

        sasl_buckets = len(
            [bucket for bucket in buckets if bucket.startswith("bucket")])
        self._create_sasl_buckets(master_node, sasl_buckets, master_id,
                                  bucket_size)
        standard_buckets = len([
            bucket for bucket in buckets
            if bucket.startswith("standard_bucket")
        ])
        self._create_standard_buckets(master_node, standard_buckets, master_id,
                                      bucket_size)
        if "default" in buckets:
            self.cluster.create_default_bucket(master_node, bucket_size,
                                               self._num_replicas)
            self.buckets.append(
                Bucket(name="default",
                       authType="sasl",
                       saslPassword="",
                       num_replicas=self._num_replicas,
                       bucket_size=bucket_size,
                       master_id=master_id))
        self.sleep(30)
Example #8
0
    def operations(self, multi_nodes=False):
        self.quota = self._initialize_nodes(self.cluster, self.servers,
                                            self.disabled_consistent_view)
        self.buckets = []
        gc.collect()
        if self.total_buckets > 0:
            self.bucket_size = self._get_bucket_size(self.quota,
                                                     self.total_buckets)

        if self.default_bucket:
            self.cluster.create_default_bucket(self.master, self.bucket_size,
                                               self.num_replicas)
            self.buckets.append(
                Bucket(name="default",
                       authType="sasl",
                       saslPassword="",
                       num_replicas=self.num_replicas,
                       bucket_size=self.bucket_size))

        self._create_sasl_buckets(self.master, self.sasl_buckets)
        self._create_standard_buckets(self.master, self.standard_buckets)
        if multi_nodes:
            servers_in = [
                self.servers[i + 1]
                for i in range(self.initial_num_servers - 1)
            ]
            self.cluster.rebalance(self.servers[:1], servers_in, [])
        if self.op_types == "data":
            self._load_data_all_buckets("create")
            if multi_nodes:
                self._wait_for_stats_all_buckets(
                    self.servers[:self.initial_num_servers])
            else:
                self._wait_for_stats_all_buckets([self.master])
Example #9
0
 def _create_memcached_buckets(self,
                               server,
                               num_buckets,
                               server_id=None,
                               bucket_size=None):
     if not num_buckets:
         return
     if server_id is None:
         server_id = RestConnection(server).get_nodes_self().id
     if bucket_size is None:
         bucket_size = self.bucket_size
     bucket_tasks = []
     for i in range(num_buckets):
         name = 'memcached_bucket' + str(i)
         bucket_tasks.append(self.cluster.async_create_memcached_bucket(server, name,
                                                                       11214 + \
                                                                       self.standard_buckets + i,
                                                                       bucket_size,
                                                                       self.num_replicas))
         self.buckets.append(Bucket(name=name, authType=None, saslPassword=None,
                                    num_replicas=self.num_replicas,
                                    bucket_size=bucket_size, port=11214 + \
                                                     self.standard_buckets + i,
                                    master_id=server_id, type='memcached'))
     for task in bucket_tasks:
         task.result()
Example #10
0
 def _create_sasl_buckets(self,
                          server,
                          num_buckets,
                          server_id=None,
                          bucket_size=None):
     if not num_buckets:
         return
     if server_id is None:
         server_id = RestConnection(server).get_nodes_self().id
     if bucket_size is None:
         bucket_size = self.bucket_size
     bucket_tasks = []
     for i in range(num_buckets):
         name = 'bucket' + str(i)
         bucket_tasks.append(
             self.cluster.async_create_sasl_bucket(server, name, 'password',
                                                   bucket_size,
                                                   self.num_replicas))
         self.buckets.append(
             Bucket(name=name,
                    authType="sasl",
                    saslPassword='******',
                    num_replicas=self.num_replicas,
                    bucket_size=bucket_size,
                    master_id=server_id))
     for task in bucket_tasks:
         task.result(self.wait_timeout * 10)
Example #11
0
    def rebalance_in_out_at_once_with_max_buckets_number(self):
        servs_init = self.servers[:self.nodes_init]
        servs_in = [self.servers[i + self.nodes_init] for i in range(self.nodes_in)]
        servs_out = [self.servers[self.nodes_init - i - 1] for i in range(self.nodes_out)]
        rest = RestConnection(self.master)
        self._wait_for_stats_all_buckets(servs_init)
        self.log.info("current nodes : {0}".format([node.id for node in rest.node_statuses()]))
        self.log.info("adding nodes {0} to cluster".format(servs_in))
        self.log.info("removing nodes {0} from cluster".format(servs_out))
        result_nodes = set(servs_init + servs_in) - set(servs_out)
        rest = RestConnection(self.master)
        bucket_num = rest.get_internalSettings("maxBucketCount")
        self.bucket_size = self.quota / bucket_num

        self.log.info('total %s buckets will be created with size %s MB' % (bucket_num, self.bucket_size))
        self.cluster.create_default_bucket(self.master, self.bucket_size, self.num_replicas)
        self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="",
                                       num_replicas=self.num_replicas, bucket_size=self.bucket_size))
        self._create_sasl_buckets(self.master, (bucket_num - 1) / 2)
        self._create_standard_buckets(self.master, bucket_num - 1 - (bucket_num - 1) / 2)

        gen = BlobGenerator('mike', 'mike-', self.value_size, end=self.num_items)
        self._load_all_buckets(self.master, gen, "create", 0)
        self._wait_for_stats_all_buckets(servs_init)

        rebalance = self.cluster.async_rebalance(servs_init, servs_in, servs_out)
        self._async_load_all_buckets(self.master, gen, "update", 0)
        rebalance.result()
        self.verify_cluster_stats(result_nodes)
        self.verify_unacked_bytes_all_buckets()
Example #12
0
 def setUp(self):
     try:
         if 'first_case' not in TestInputSingleton.input.test_params:
             TestInputSingleton.input.test_params['default_bucket'] = False
             TestInputSingleton.input.test_params['skip_cleanup'] = True
         self.default_bucket_name = 'default'
         super(ViewMergingTests, self).setUp()
         if 'first_case' in TestInputSingleton.input.test_params:
             self.cluster.rebalance(self.servers[:], self.servers[1:], [])
         # We use only one bucket in this test suite
         self.rest = RestConnection(self.master)
         self.bucket = self.rest.get_bucket(Bucket(name=self.default_bucket_name))
         # num_docs must be a multiple of the number of vbuckets
         self.num_docs = self.input.param("num_docs_per_vbucket", 1) * \
                         len(self.bucket.vbuckets)
         self.is_dev_view = self.input.param("is_dev_view", True)
         self.map_view_name = 'mapview1'
         self.red_view_name = 'redview1'
         self.red_view_stats_name = 'redview_stats'
         self.clients = self.init_clients()
         if 'first_case' in TestInputSingleton.input.test_params:
              self.create_ddocs()
     except Exception as ex:
         self.input.test_params["stop-on-failure"] = True
         self.log.error("SETUP WAS FAILED. ALL TESTS WILL BE SKIPPED")
         self.fail(ex)
Example #13
0
    def incremental_rebalance_in_out_with_max_buckets_number(self):
        self.bucket_size = self.input.param("bucket_size", 100)
        bucket_num = max(10, self.quota / self.bucket_size)
        self.log.info('total %s buckets will be created with size %s MB' %
                      (bucket_num, self.bucket_size))
        self.cluster.create_default_bucket(self.master, self.bucket_size,
                                           self.num_replicas)
        self.buckets.append(
            Bucket(name="default",
                   authType="sasl",
                   saslPassword="",
                   num_replicas=self.num_replicas,
                   bucket_size=self.bucket_size))
        self._create_sasl_buckets(self.master, (bucket_num - 1) / 2)

        self._create_standard_buckets(self.master,
                                      bucket_num - 1 - (bucket_num - 1) / 2)
        self.cluster.rebalance(self.servers[:self.num_servers],
                               self.servers[1:self.num_servers], [])
        gen = BlobGenerator('mike',
                            'mike-',
                            self.value_size,
                            end=self.num_items)
        self._load_all_buckets(self.master, gen, "create", 0)

        for i in reversed(range(self.num_servers)[self.num_servers / 2:]):
            self._async_load_all_buckets(self.master, gen, "update", 0)

            self.cluster.rebalance(self.servers[:i], [],
                                   self.servers[i:self.num_servers])
            self.sleep(5)
            self._async_load_all_buckets(self.master, gen, "update", 0)
            self.cluster.rebalance(self.servers[:self.num_servers],
                                   self.servers[i:self.num_servers], [])
            self.verify_cluster_stats(self.servers[:self.num_servers])
Example #14
0
    def test_verify_mb15892(self):
        """
        Test case for MB-15892

        Create replication should not allow setting-up replication to remote memcached bucket
        Make sure to set default_bucket to False as the test will create default buckets
        on source and destination masters
        """
        rest_conn_src = RestConnection(self.src_master)
        rest_conn_src.create_bucket(bucket='default', ramQuotaMB=256)
        master_id = rest_conn_src.get_nodes_self().id
        #if not cluster run use ip addresses instead of localhost
        if len(set([server.ip for server in self._servers])) != 1:
            master_id = master_id.replace("127.0.0.1", self.src_master.ip).replace("localhost", self.src_master.ip)
        self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="",
                                       num_replicas=self._num_replicas, bucket_size=256, master_id=master_id,
                                       eviction_policy=self.eviction_policy))

        rest_conn_dest = RestConnection(self.dest_master)
        rest_conn_dest.create_bucket(bucket='default', ramQuotaMB=256, bucketType='memcached')
        master_id = rest_conn_dest.get_nodes_self().id
        #if not cluster run use ip addresses instead of localhost
        if len(set([server.ip for server in self._servers])) != 1:
            master_id = master_id.replace("127.0.0.1", self.dest_master.ip).replace("localhost", self.dest_master.ip)
        self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="",
                                       num_replicas=self._num_replicas, bucket_size=256, master_id=master_id,
                                       eviction_policy=self.eviction_policy))

        remote_cluster_name = "C2"
        self._link_clusters(self.src_master, remote_cluster_name, self.dest_master)

        buckets = self._get_cluster_buckets(self.src_master)
        src_bucket = buckets[0]

        buckets = self._get_cluster_buckets(self.dest_master)
        dest_bucket = buckets[0]

        try:
            rest_conn_src.start_replication(XDCRConstants.REPLICATION_TYPE_CONTINUOUS,
                                        src_bucket.name, remote_cluster_name,
                                        self.rep_type, toBucket=dest_bucket.name)
        except Exception as e:
            expected_error = "Incompatible target bucket"
            self.assertTrue(expected_error in str(e), "Incompatible target bucket exception not raised as expected")
            self.log.info("Incompatible target bucket exception raised as expected")
Example #15
0
    def _bucket_creation(self):
        if self.default_bucket:
            self.cluster.create_default_bucket(self.master, self.bucket_size, self.num_replicas)
            self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="",
                                       num_replicas=self.num_replicas, bucket_size=self.bucket_size))

        self._create_sasl_buckets(self.master, self.sasl_buckets)
        self._create_standard_buckets(self.master, self.standard_buckets)
        self._create_memcached_buckets(self.master, self.memcached_buckets)
Example #16
0
    def recover_to_backupdir(self):
        """Recover data with 2.0 couchstore files to a 2.0 backup diretory

        We load a number of items to a node first and then do some mutataion on these items.
        Later we use cbtransfer to transfer the couchstore files we have on this node to
        a backup directory. We use cbrestore to restore these backup files to the same node
        for verification."""

        self.load_data()

        kvs_before = {}
        bucket_names = []

        self.shell.delete_files(self.backup_location)
        self.shell.create_directory(self.backup_location)

        for bucket in self.buckets:
            kvs_before[bucket.name] = bucket.kvs[1]
            bucket_names.append(bucket.name)
            transfer_source = "-v -v -v couchstore-files://%s" % (
                COUCHBASE_DATA_PATH)
            transfer_destination = self.backup_location
            self.shell.execute_cbtransfer(transfer_source,
                                          transfer_destination)

        self._all_buckets_delete(self.server_origin)
        if self.default_bucket:
            self.cluster.create_default_bucket(self.server_origin,
                                               self.bucket_size,
                                               self.num_replicas)
            self.buckets.append(
                Bucket(name="default",
                       authType="sasl",
                       saslPassword="",
                       num_replicas=self.num_replicas,
                       bucket_size=self.bucket_size))
        self._create_sasl_buckets(self.server_origin, self.sasl_buckets)
        self._create_standard_buckets(self.server_origin,
                                      self.standard_buckets)

        for bucket in self.buckets:
            bucket.kvs[1] = kvs_before[bucket.name]
        del kvs_before
        self.shell.restore_backupFile(self.couchbase_login_info,
                                      self.backup_location, bucket_names)
        time.sleep(self.expire_time + 1)
        for bucket in self.buckets:
            self.shell.execute_cbepctl(bucket, "", "set flush_param",
                                       "exp_pager_stime", 5)
        time.sleep(30)

        self._wait_for_stats_all_buckets([self.server_origin])
        self._verify_all_buckets(self.server_origin, 1, self.wait_timeout * 50,
                                 None, True)
        self._verify_stats_all_buckets([self.server_origin])
    def recover_to_cbserver(self):
        """Recover data with 2.0 couchstore files to a 2.0 online server

        We load a number of items to one node first and then do some mutation on these items.
        Later we use cbtranfer to transfer the couchstore files we have on this
        node to a new node. We verify the data by comparison between the items in KVStore
        and items in the new node."""

        self.load_data()

        kvs_before = {}
        bucket_names = []
        for bucket in self.buckets:
            kvs_before[bucket.name] = bucket.kvs[1]
            bucket_names.append(bucket.name)

        if self.default_bucket:
            self.cluster.create_default_bucket(self.server_recovery,
                                               self.bucket_size,
                                               self.num_replicas)
            self.buckets.append(
                Bucket(name="default",
                       authType="sasl",
                       saslPassword="",
                       num_replicas=self.num_replicas,
                       bucket_size=self.bucket_size))
        self._create_sasl_buckets(self.server_recovery, self.sasl_buckets)
        self._create_standard_buckets(self.server_recovery,
                                      self.standard_buckets)

        for bucket in self.buckets:
            bucket.kvs[1] = kvs_before[bucket.name]
            transfer_source = "couchstore-files://%s" % (COUCHBASE_DATA_PATH)
            transfer_destination = "http://%s@%s:%s -b %s -B %s -v -v -v" % (
                self.couchbase_login_info, self.server_recovery.ip,
                self.server_recovery.port, bucket.name, bucket.name)
            self.shell.execute_cbtransfer(transfer_source,
                                          transfer_destination)
        del kvs_before
        time.sleep(self.expire_time + 1)
        shell_server_recovery = RemoteMachineShellConnection(
            self.server_recovery)
        for bucket in self.buckets:
            shell_server_recovery.execute_cbepctl(bucket, "",
                                                  "set flush_param",
                                                  "exp_pager_stime", 5)
        shell_server_recovery.disconnect()
        time.sleep(30)

        self._wait_for_stats_all_buckets([self.server_recovery])
        self._verify_all_buckets(self.server_recovery, 1,
                                 self.wait_timeout * 50, self.max_verify, True,
                                 1)
        self._verify_stats_all_buckets([self.server_recovery])
Example #18
0
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        self.buckets = []
        self.master = self.servers[0]
        self.cluster = Cluster()
        self.wait_timeout = self.input.param("wait_timeout", 60)
        #number of case that is performed from testrunner( increment each time)
        self.case_number = self.input.param("case_number", 0)
        self.default_bucket = self.input.param("default_bucket", True)
        if self.default_bucket:
            self.default_bucket_name = "default"
        self.standard_buckets = self.input.param("standard_buckets", 0)
        self.sasl_buckets = self.input.param("sasl_buckets", 0)
        self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets
        self.num_servers = self.input.param("servers", len(self.servers))
        self.num_replicas = self.input.param("replicas", 1)
        self.num_items = self.input.param("items", 1000)
        self.dgm_run = self.input.param("dgm_run", False)
        #max items number to verify in ValidateDataTask, None - verify all
        self.max_verify = self.input.param("max_verify", None)
        #we don't change consistent_view on server by default
        self.disabled_consistent_view = self.input.param(
            "disabled_consistent_view", None)
        self.log.info("==============  basetestcase setup was started for test #{0} {1}=============="\
                      .format(self.case_number, self._testMethodName))
        #avoid clean up if the previous test has been tear down
        if not self.input.param("skip_cleanup", True) or self.case_number == 1:
            self.tearDown()
            self.cluster = Cluster()
        self.quota = self._initialize_nodes(self.cluster, self.servers,
                                            self.disabled_consistent_view)
        if self.dgm_run:
            self.quota = 256
        if self.total_buckets > 0:
            self.bucket_size = self._get_bucket_size(self.quota,
                                                     self.total_buckets)

        if self.default_bucket:
            self.cluster.create_default_bucket(self.master, self.bucket_size,
                                               self.num_replicas)
            self.buckets.append(
                Bucket(name="default",
                       authType="sasl",
                       saslPassword="",
                       num_replicas=self.num_replicas,
                       bucket_size=self.bucket_size))

        self._create_sasl_buckets(self.master, self.sasl_buckets)
        self._create_standard_buckets(self.master, self.standard_buckets)
        self.log.info("==============  basetestcase setup was finished for test #{0} {1} =============="\
                      .format(self.case_number, self._testMethodName))
        self._log_start(self)
Example #19
0
    def CreateUpdateDeleteExpireDuringBackup(self):
        """Backup the items during mutation on existing items is running.

        We first load amount of items. After that, when we start backup, we begin do mutations on these existing items."""

        gen_load = BlobGenerator('mysql', 'mysql-', self.value_size, end=self.num_items)
        gen_update = BlobGenerator('mysql', 'mysql-', self.value_size, end=(self.num_items / 2 - 1))
        gen_expire = BlobGenerator('mysql', 'mysql-', self.value_size, start=self.num_items / 2, end=(self.num_items * 3 / 4 - 1))
        gen_delete = BlobGenerator('mysql', 'mysql-', self.value_size, start=self.num_items * 3 / 4, end=self.num_items)
        self._load_all_buckets(self.master, gen_load, "create", 0, 1, 0, True, batch_size=20000, pause_secs=5, timeout_secs=180)
        self._wait_for_stats_all_buckets(self.servers[:self.num_servers])

        mutate_threads = []
        if(self.doc_ops is not None):
            if("update" in self.doc_ops):
                mutate_threads.append(self._async_load_all_buckets(self.master, gen_update, "update", 0, 1, 0, True, batch_size=20000))

            if("delete" in self.doc_ops):
                mutate_threads.append(self._async_load_all_buckets(self.master, gen_delete, "delete", 0, 1, 0, True, batch_size=20000))

            if("expire" in self.doc_ops):
                mutate_threads.append(self._async_load_all_buckets(self.master, gen_expire, "update", self.expire_time, 1, 0, True, batch_size=20000))

        first_backup_thread = Thread(target=self.shell.execute_cluster_backup,
                                     name="backup",
                                     args=(self.couchbase_login_info, self.backup_location, self.command_options))
        first_backup_thread.start()
        first_backup_thread.join()

        for t in mutate_threads:
            for task in t:
                task.result()

        kvs_before = {}
        for bucket in self.buckets:
            kvs_before[bucket.name] = bucket.kvs[1]
        self._all_buckets_delete(self.master)
        gc.collect()

        if self.default_bucket:
            self.cluster.create_default_bucket(self.master, self.bucket_size, self.num_replicas)
            self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="", num_replicas=self.num_replicas, bucket_size=self.bucket_size))
        self._create_sasl_buckets(self.master, self.sasl_buckets)
        self._create_standard_buckets(self.master, self.standard_buckets)

        for bucket in self.buckets:
            bucket.kvs[1] = kvs_before[bucket.name]
        del kvs_before
        gc.collect()
        bucket_names = [bucket.name for bucket in self.buckets]
        self.shell.restore_backupFile(self.couchbase_login_info, self.backup_location, bucket_names)
        time.sleep(self.expire_time) #system sleeps for expired items
        self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
Example #20
0
    def LoadDuringBackup(self):
        """Backup the items during data loading is running.

        We first load a number of items. Then we start backup while loading another amount number of items into
        cluster as "noise" during the backup. During verification, we want to make sure that every item before backup
        starts can be restored correctly."""

        gen_load_backup = BlobGenerator('couchdb', 'couchdb', self.value_size, end=self.backup_items)
        self._load_all_buckets(self.master, gen_load_backup, "create", 0, 2, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
        #store items before backup starts to kvstores[2]
        self._wait_for_stats_all_buckets(self.servers[:self.num_servers])

        gen_load = BlobGenerator('mysql', 'mysql-', self.value_size, end=self.num_items)
        data_load_thread = Thread(target=self._load_all_buckets,
                                  name="load_data",
                                  args=(self.master, gen_load, "create", 0, 1, 0, True))
        #store noise items during backup to kvstores[1]

        backup_thread = Thread(target=self.shell.execute_cluster_backup,
                               name="backup",
                               args=(self.couchbase_login_info, self.backup_location, self.command_options))

        backup_thread.start()
        data_load_thread.start()
        data_load_thread.join()
        backup_thread.join()
        #TODO: implement a mechanism to check the backup progress to prevent backup_thread hangs up
        self._wait_for_stats_all_buckets(self.servers[:self.num_servers])

        kvs_before = {}
        for bucket in self.buckets:
            kvs_before[bucket.name] = bucket.kvs[2]
        self._all_buckets_delete(self.master)
        gc.collect()

        if self.default_bucket:
            self.cluster.create_default_bucket(self.master, self.bucket_size, self.num_replicas)
            self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="", num_replicas=self.num_replicas, bucket_size=self.bucket_size))
        self._create_sasl_buckets(self.master, self.sasl_buckets)
        self._create_standard_buckets(self.master, self.standard_buckets)

        for bucket in self.buckets:
            bucket.kvs[2] = kvs_before[bucket.name]
        del kvs_before
        gc.collect()
        bucket_names = [bucket.name for bucket in self.buckets]
        self.shell.restore_backupFile(self.couchbase_login_info, self.backup_location, bucket_names)

        for bucket in self.buckets:
            del bucket.kvs[1]
        self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
        self.verify_results(self.master, 2) #do verification only with kvstores[2]
Example #21
0
    def get_indices_as_buckets(self):
        buckets = []
        indices = self.get_indices()

        for index in indices:
            bucket = Bucket()
            stats = self.conn.indices.stats()['indices'][index]
            bucket.name = index
            bucket.type = "es"
            bucket.port = self.port
            bucket.authType = None
            bucket.saslPassword = self.password
            bucket.nodes = list()

            #vBucketServerMap
            bucketStats = BucketStats()
            bucketStats.itemCount = stats['primaries']['docs']['count']
            bucket.stats = bucketStats
            buckets.append(bucket)
            bucket.master_id = "es@" + self.ip

        return buckets
Example #22
0
    def CreateUpdateDeleteExpireBeforeBackup(self):
        """Backup up the buckets after operations: update, delete, expire.

        We load a number of items first and then load some extra items. We do update, delete, expire operation
        on those extra items. After these mutations, we backup all the items and restore them for verification """

        gen_load = BlobGenerator('mysql', 'mysql-', self.value_size, end=self.num_items)
        gen_extra = BlobGenerator('couchdb', 'couchdb-', self.value_size, end=self.num_mutate_items)
        self._load_all_buckets(self.master, gen_load, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
        extra_items_deleted_flag = 0

        if(self.doc_ops is not None):
            self._load_all_buckets(self.master, gen_extra, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
            if("update" in self.doc_ops):
                self._load_all_buckets(self.master, gen_extra, "update", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
            if("delete" in self.doc_ops):
                self._load_all_buckets(self.master, gen_extra, "delete", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
                extra_items_deleted_flag = 1
            if("expire" in self.doc_ops):
                if extra_items_deleted_flag == 1:
                    self._load_all_buckets(self.master, gen_extra, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
                self._load_all_buckets(self.master, gen_extra, "update", self.expire_time, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
        self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
        time.sleep(30)

        self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, self.command_options)

        kvs_before = {}
        for bucket in self.buckets:
            kvs_before[bucket.name] = bucket.kvs[1]
        self._all_buckets_delete(self.master)
        gc.collect()

        if self.default_bucket:
            default_params=self._create_bucket_params(server=self.master, size=self.bucket_size,
                                                             replicas=self.num_replicas)
            self.cluster.create_default_bucket(default_params)
            self.buckets.append(Bucket(name="default", num_replicas=self.num_replicas, bucket_size=self.bucket_size))
        self._create_standard_buckets(self.master, self.standard_buckets)

        for bucket in self.buckets:
            bucket.kvs[1] = kvs_before[bucket.name]
        del kvs_before
        gc.collect()
        bucket_names = [bucket.name for bucket in self.buckets]
        self.shell.restore_backupFile(self.couchbase_login_info, self.backup_location, bucket_names)
        time.sleep(self.expire_time) #system sleeps for expired items

        self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
        self.verify_results(self.master)
        self._verify_stats_all_buckets(self.servers[:self.num_servers])
Example #23
0
    def get_indices_as_buckets(self, doc_type='couchbaseDocument'):
        buckets = []
        indices = self.get_indices()

        for index in indices:
            bucket = Bucket()
            q = query.MatchAllQuery()
            docs = self.conn.search(q, index, doc_type)
            bucket.name = index
            bucket.type = "es"
            bucket.port = self.port
            bucket.authType = None
            bucket.saslPassword = self.password
            bucket.nodes = list()

            #vBucketServerMap
            bucketStats = BucketStats()
            bucketStats.itemCount = docs.count()
            bucket.stats = bucketStats
            buckets.append(bucket)
            bucket.master_id = "es@" + self.ip

        return buckets
Example #24
0
    def CreateUpdateDeleteBeforeBackup(self):
        """Back up the buckets after doing docs operations: create, update, delete, recreate.

        We load 2 kinds of items into the cluster with different key value prefix. Then we do
        mutations on part of the items according to clients' input param. After backup, we
        delete the existing buckets then recreate them and restore all the buckets. We verify
        the results by comparison between the items in KVStore and restored buckets items."""

        gen_load_mysql = BlobGenerator('mysql', 'mysql-', self.value_size, end=(self.num_items/2-1))
        gen_load_couchdb = BlobGenerator('couchdb', 'couchdb-', self.value_size, start=self.num_items/2, end=self.num_items)
        gen_update = BlobGenerator('mysql', 'mysql-', self.value_size, end=(self.num_items // 2 - 1))
        gen_delete = BlobGenerator('couchdb', 'couchdb-', self.value_size, start=self.num_items // 2, end=self.num_items)
        gen_create = BlobGenerator('mysql', 'mysql-', self.value_size, start=self.num_items // 2 + 1, end=self.num_items *3 // 2)
        self._load_all_buckets(self.master, gen_load_mysql, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
        self._load_all_buckets(self.master, gen_load_couchdb, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)

        if(self.doc_ops is not None):
            if("update" in self.doc_ops):
                self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
            if("create" in self.doc_ops):
                self._load_all_buckets(self.master, gen_create, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
            if("delete" in self.doc_ops):
                self._load_all_buckets(self.master, gen_delete, "delete", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
        self._wait_for_stats_all_buckets(self.servers[:self.num_servers])

        self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, self.command_options)

        kvs_before = {}
        for bucket in self.buckets:
            kvs_before[bucket.name] = bucket.kvs[1]
        bucket_names = [bucket.name for bucket in self.buckets]
        self._all_buckets_delete(self.master)
        gc.collect()

        if self.default_bucket:
            default_params=self._create_bucket_params(server=self.master, size=self.bucket_size,
                                                             replicas=self.num_replicas)
            self.cluster.create_default_bucket(default_params)
            self.buckets.append(Bucket(name="default", num_replicas=self.num_replicas, bucket_size=self.bucket_size))
        self._create_standard_buckets(self.master, self.standard_buckets)

        for bucket in self.buckets:
            bucket.kvs[1] = kvs_before[bucket.name]
        del kvs_before
        gc.collect()
        self.shell.restore_backupFile(self.couchbase_login_info, self.backup_location, bucket_names)

        self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
        self.verify_results(self.master)
        self._verify_stats_all_buckets(self.servers[:self.num_servers])
Example #25
0
    def _create_buckets_(self):
        if self.default_bucket:
            self._cluster_helper.create_default_bucket(self.master,
                                                       self.bucket_size,
                                                       self._num_replicas)
            self.buckets.append(
                Bucket(name="default",
                       authType="sasl",
                       saslPassword="",
                       num_replicas=self._num_replicas,
                       bucket_size=self.bucket_size))

        self._create_sasl_buckets(self.master, self._sasl_buckets)
        self._create_standard_buckets(self.master, self._standard_buckets)
Example #26
0
 def _create_sasl_buckets(self, server, num_buckets):
     bucket_tasks = []
     for i in range(num_buckets):
         name = 'bucket' + str(i)
         bucket_tasks.append(
             self.cluster.async_create_sasl_bucket(server, name, 'password',
                                                   self.bucket_size,
                                                   self.num_replicas))
         self.buckets.append(
             Bucket(name=name,
                    authType="sasl",
                    saslPassword='******',
                    num_replicas=self.num_replicas,
                    bucket_size=self.bucket_size))
     for task in bucket_tasks:
         task.result()
Example #27
0
    def get_indices_as_buckets(self):
        buckets = []
        indices = self.get_indices()

        for index in indices:
            bucket = Bucket()
            stats = self.conn.indices.stats()['indices'][index]
            bucket.name = index
            bucket.type = "es"
            bucket.port = self.port
            bucket.authType = None
            bucket.saslPassword = self.password
            bucket.nodes = list()

            #vBucketServerMap
            bucketStats = BucketStats()
            bucketStats.itemCount = stats['primaries']['docs']['count']
            bucket.stats = bucketStats
            buckets.append(bucket)
            bucket.master_id = "es@"+self.ip

        return buckets
Example #28
0
    def _create_standard_buckets(self, server, server_id, bucket_size):
        bucket_tasks = []
        for i in range(self._standard_buckets):
            name = "standard_bucket_" + str(i + 1)
            bucket_tasks.append(
                self._cluster_helper.async_create_standard_bucket(
                    server, name, 11214 + i, bucket_size, self._num_replicas))
            self._buckets.append(
                Bucket(name=name,
                       authType=None,
                       saslPassword=None,
                       num_replicas=self._num_replicas,
                       bucket_size=bucket_size,
                       master_id=server_id))

        for task in bucket_tasks:
            task.result()
Example #29
0
    def get_indices_as_buckets(self, doc_type='couchbaseDocument'):
        buckets = []
        indices = self.get_indices()

        for index in indices:
            bucket = Bucket()
            q = query.MatchAllQuery()
            docs = self.conn.search(q,index,doc_type)
            bucket.name = index
            bucket.type = "es"
            bucket.port = self.port
            bucket.authType = None
            bucket.saslPassword = self.password
            bucket.nodes = list()

            #vBucketServerMap
            bucketStats = BucketStats()
            bucketStats.itemCount = docs.count()
            bucket.stats = bucketStats
            buckets.append(bucket)
            bucket.master_id = "es@"+self.ip

        return buckets
Example #30
0
    def _create_standard_buckets(self, server, num_buckets):
        bucket_tasks = []
        for i in range(num_buckets):
            name = 'standard_bucket' + str(i)
            bucket_tasks.append(
                self.cluster.async_create_standard_bucket(
                    server, name, 11214 + i, self.bucket_size,
                    self.num_replicas))

            self.buckets.append(
                Bucket(name=name,
                       authType=None,
                       saslPassword=None,
                       num_replicas=self.num_replicas,
                       bucket_size=self.bucket_size,
                       port=11214 + i))
        for task in bucket_tasks:
            task.result()
Example #31
0
    def setUp(self):
        super(StatsRepro, self).setUp()
        self.timeout = 120
        self.bucket_name = self.input.param("bucket", "default")
        self.bucket_size = self.input.param("bucket_size", 100)
        self.data_size = self.input.param("data_size", 2048)
        self.threads_to_run = self.input.param("threads_to_run", 5)
#        self.nodes_in = int(self.input.param("nodes_in", 1))
#        self.servs_in = [self.servers[i + 1] for i in range(self.nodes_in)]
#        rebalance = self.cluster.async_rebalance(self.servers[:1], self.servs_in, [])
#        rebalance.result()
        bucket_params=self._create_bucket_params(server=self.servers[0], size=self.bucket_size, replicas=self.num_replicas)
        self.cluster.create_default_bucket(bucket_params)

        self.buckets.append(Bucket(name="default",
             num_replicas=self.num_replicas, bucket_size=self.bucket_size))
        rest = RestConnection(self.servers[0])
        self.nodes_server = rest.get_nodes()
Example #32
0
 def setUp(self):
     super(WarmUpTests, self).setUp()
     self.pre_warmup_stats = {}
     self.timeout = 120
     self.bucket_name = self.input.param("bucket", "default")
     self.bucket_size = self.input.param("bucket_size", 256)
     self.data_size = self.input.param("data_size", 2048)
     self.nodes_in = int(self.input.param("nodes_in", 1))
     self.access_log = self.input.param("access_log", False)
     self.servs_in = [self.servers[i + 1] for i in range(self.nodes_in)]
     rebalance = self.cluster.async_rebalance(self.servers[:1],
                                              self.servs_in, [])
     rebalance.result()
     self.cluster.create_default_bucket(self.servers[0], self.bucket_size,
                                        self.num_replicas)
     self.buckets.append(
         Bucket(name="default",
                authType="sasl",
                saslPassword="",
                num_replicas=self.num_replicas,
                bucket_size=self.bucket_size))