def _create_default_bucket(self):
     helper = RestHelper(self.rest)
     if not helper.bucket_exists(self.bucket):
         node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
         info = self.rest.get_nodes_self()
         available_ram = int(info.memoryQuota * node_ram_ratio)
         if available_ram < 256:
             available_ram = 256
         self.rest.create_bucket(bucket=self.bucket, ramQuotaMB=available_ram)
         ready = BucketOperationHelper.wait_for_memcached(self.master, self.bucket)
         self.testcase.assertTrue(ready, "wait_for_memcached failed")
     self.testcase.assertTrue(helper.bucket_exists(self.bucket), "unable to create {0} bucket".format(self.bucket))
示例#2
0
 def _create_default_bucket(self, replica=1):
     name = "default"
     master = self.servers[0]
     rest = RestConnection(master)
     helper = RestHelper(RestConnection(master))
     if not helper.bucket_exists(name):
         node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
         info = rest.get_nodes_self()
         available_ram = info.memoryQuota * node_ram_ratio
         rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram), replicaNumber=replica)
         ready = BucketOperationHelper.wait_for_memcached(master, name)
         self.assertTrue(ready, msg="wait_for_memcached failed")
     self.assertTrue(helper.bucket_exists(name), msg="unable to create {0} bucket".format(name))
示例#3
0
 def _create_default_bucket(self):
     name = "default"
     master = self.master
     rest = RestConnection(master)
     helper = RestHelper(RestConnection(master))
     if not helper.bucket_exists(name):
         node_ram_ratio = BucketOperationHelper.base_bucket_ratio(TestInputSingleton.input.servers)
         info = rest.get_nodes_self()
         available_ram = info.memoryQuota * node_ram_ratio
         rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram))
         ready = BucketOperationHelper.wait_for_memcached(master, name)
         self.assertTrue(ready, msg="wait_for_memcached failed")
     self.assertTrue(helper.bucket_exists(name),
                     msg="unable to create {0} bucket".format(name))
示例#4
0
 def _create_default_bucket(self, replica=1):
     name = "default"
     master = self.servers[0]
     rest = RestConnection(master)
     helper = RestHelper(RestConnection(master))
     if not helper.bucket_exists(name):
         node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
         info = rest.get_nodes_self()
         available_ram = info.memoryQuota * node_ram_ratio
         rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram), replicaNumber=replica,
                            storageBackend=self.bucket_storage)
         ready = BucketOperationHelper.wait_for_memcached(master, name)
         self.assertTrue(ready, msg="wait_for_memcached failed")
     self.assertTrue(helper.bucket_exists(name),
         msg="unable to create {0} bucket".format(name))
示例#5
0
 def _create_default_bucket(self, unittest):
     name = "default"
     master = self.master
     rest = RestConnection(master)
     helper = RestHelper(RestConnection(master))
     if not helper.bucket_exists(name):
         node_ram_ratio = BucketOperationHelper.base_bucket_ratio(TestInputSingleton.input.servers)
         info = rest.get_nodes_self()
         available_ram = info.memoryQuota * node_ram_ratio
         rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram))
         ready = BucketOperationHelper.wait_for_memcached(master, name)
         BucketOperationHelper.wait_for_vbuckets_ready_state(master, name)
         unittest.assertTrue(ready, msg="wait_for_memcached failed")
     unittest.assertTrue(helper.bucket_exists(name),
                         msg="unable to create {0} bucket".format(name))
 def _create_default_bucket(self):
     name = "default"
     master = self.servers[0]
     rest = RestConnection(master)
     helper = RestHelper(RestConnection(master))
     if not helper.bucket_exists(name):
         node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
         info = rest.get_nodes_self()
         available_ram = info.mcdMemoryReserved * node_ram_ratio
         rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram))
         ready = BucketOperationHelper.wait_for_memcached(master, name)
         self.assertTrue(ready, msg="wait_for_memcached failed")
     self.assertTrue(helper.bucket_exists(name),
                     msg="unable to create {0} bucket".format(name))
     self.load_thread = None
     self.shutdown_load_data = False
示例#7
0
 def _create_default_bucket(self):
     rest = RestConnection(self.master)
     helper = RestHelper(RestConnection(self.master))
     if not helper.bucket_exists(self.bucket):
         node_ram_ratio = BucketOperationHelper.base_bucket_ratio([self.master])
         info = rest.get_nodes_self()
         available_ram = info.memoryQuota * node_ram_ratio
         serverInfo = self.master
         rest.init_cluster(username=serverInfo.rest_username,
                           password=serverInfo.rest_password)
         rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
         rest.create_bucket(bucket=self.bucket, ramQuotaMB=int(available_ram))
         ready = BucketOperationHelper.wait_for_memcached(self.master, self.bucket)
         self.assertTrue(ready, msg="wait_for_memcached failed")
     self.assertTrue(helper.bucket_exists(self.bucket),
                     msg="unable to create {0} bucket".format(self.bucket))
示例#8
0
 def _create_default_bucket(self, replica=1):
     name = "default"
     master = self.servers[0]
     rest = RestConnection(master)
     helper = RestHelper(RestConnection(master))
     if not helper.bucket_exists(name):
         node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
         info = rest.get_nodes_self()
         available_ram = info.memoryQuota * node_ram_ratio
         if(available_ram < 256):
             available_ram = 256
         rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram), replicaNumber=replica)
         msg = 'This not_my_vbucket error is part of wait_for_memcached method, not an issue'
         ready = BucketOperationHelper.wait_for_memcached(master, name, log_msg=msg)
         self.assertTrue(ready, msg="wait_for_memcached failed")
     self.assertTrue(helper.bucket_exists(name),
                     msg="unable to create {0} bucket".format(name))
 def _create_default_bucket(self):
     name = "default"
     master = self.servers[0]
     rest = RestConnection(master)
     helper = RestHelper(RestConnection(master))
     if not helper.bucket_exists(name):
         node_ram_ratio = BucketOperationHelper.base_bucket_ratio(
             self.servers)
         info = rest.get_nodes_self()
         available_ram = info.mcdMemoryReserved * node_ram_ratio
         rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram))
         ready = BucketOperationHelper.wait_for_memcached(master, name)
         self.assertTrue(ready, msg="wait_for_memcached failed")
     self.assertTrue(helper.bucket_exists(name),
                     msg="unable to create {0} bucket".format(name))
     self.load_thread = None
     self.shutdown_load_data = False
示例#10
0
 def _create_default_bucket(self):
     helper = RestHelper(self.rest)
     if not helper.bucket_exists(self.bucket):
         node_ram_ratio = BucketOperationHelper.base_bucket_ratio(
             self.servers)
         info = self.rest.get_nodes_self()
         available_ram = int(info.memoryQuota * node_ram_ratio)
         if available_ram < 256:
             available_ram = 256
         self.rest.create_bucket(bucket=self.bucket,
                                 ramQuotaMB=available_ram)
         ready = BucketOperationHelper.wait_for_memcached(self.master,
                                                          self.bucket)
         self.testcase.assertTrue(ready, "wait_for_memcached failed")
     self.testcase.assertTrue(
         helper.bucket_exists(self.bucket),
         "unable to create {0} bucket".format(self.bucket))
 def backup_restore(self):
     try:
         backup_start = self.backups[int(self.backupset.start) - 1]
     except IndexError:
         backup_start = "{0}{1}".format(self.backups[-1], self.backupset.start)
     try:
         backup_end = self.backups[int(self.backupset.end) - 1]
     except IndexError:
         backup_end = "{0}{1}".format(self.backups[-1], self.backupset.end)
     args = "restore --archive {0} --repo {1} --host http://{2}:{3} --username {4} --password {5} --start {6} " \
            "--end {7}".format(self.backupset.directory, self.backupset.name,
                                               self.backupset.restore_cluster_host.ip,
                                               self.backupset.restore_cluster_host.port,
                                               self.backupset.restore_cluster_host_username,
                                               self.backupset.restore_cluster_host_password, backup_start,
                                               backup_end)
     if self.backupset.exclude_buckets:
         args += " --exclude-buckets {0}".format(self.backupset.exclude_buckets)
     if self.backupset.include_buckets:
         args += " --include-buckets {0}".format(self.backupset.include_buckets)
     if self.backupset.disable_bucket_config:
         args += " --disable-bucket-config {0}".format(self.backupset.disable_bucket_config)
     if self.backupset.disable_views:
         args += " --disable-views {0}".format(self.backupset.disable_views)
     if self.backupset.disable_gsi_indexes:
         args += " --disable-gsi-indexes {0}".format(self.backupset.disable_gsi_indexes)
     if self.backupset.disable_ft_indexes:
         args += " --disable-ft-indexes {0}".format(self.backupset.disable_ft_indexes)
     if self.backupset.disable_data:
         args += " --disable-data {0}".format(self.backupset.disable_data)
     if self.backupset.filter_keys:
         args += " --filter_keys {0}".format(self.backupset.filter_keys)
     if self.backupset.filter_values:
         args += " --filter_values {0}".format(self.backupset.filter_values)
     if self.backupset.force_updates:
         args += " --force-updates"
     if self.no_progress_bar:
         args += " --no-progress-bar"
     if not self.skip_buckets:
         rest_conn = RestConnection(self.backupset.restore_cluster_host)
         rest_helper = RestHelper(rest_conn)
         for bucket in self.buckets:
             if not rest_helper.bucket_exists(bucket.name):
                 self.log.info("Creating bucket {0} in restore host {1}".format(bucket.name,
                                                                                self.backupset.restore_cluster_host.ip))
                 rest_conn.create_bucket(bucket=bucket.name,
                                         ramQuotaMB=512,
                                         authType=bucket.authType if bucket.authType else 'none',
                                         proxyPort=bucket.port,
                                         saslPassword=bucket.saslPassword)
                 bucket_ready = rest_helper.vbucket_map_ready(bucket.name)
                 if not bucket_ready:
                     self.fail("Bucket %s not created after 120 seconds." % bucket.name)
     remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
     command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, args)
     output, error = remote_client.execute_command(command)
     remote_client.log_command_output(output, error)
     return output, error
示例#12
0
 def wait_for_bucket_creation(bucket, rest, timeout_in_seconds=120):
     log.info("waiting for bucket creation to complete....")
     start = time.time()
     helper = RestHelper(rest)
     while (time.time() - start) <= timeout_in_seconds:
         if helper.bucket_exists(bucket):
             return True
         else:
             time.sleep(2)
     return False
示例#13
0
 def wait_for_bucket_creation(bucket, rest, timeout_in_seconds=120):
     log.info('waiting for bucket creation to complete....')
     start = time.time()
     helper = RestHelper(rest)
     while (time.time() - start) <= timeout_in_seconds:
         if helper.bucket_exists(bucket):
             return True
         else:
             time.sleep(2)
     return False
示例#14
0
 def wait_for_bucket_deletion(bucket, rest, timeout_in_seconds=120):
     log = logger.Logger.get_logger()
     log.info('waiting for bucket deletion to complete....')
     start = time.time()
     helper = RestHelper(rest)
     while (time.time() - start) <= timeout_in_seconds:
         if not helper.bucket_exists(bucket):
             return True
         else:
             time.sleep(0.1)
     return False
示例#15
0
 def wait_for_bucket_deletion(bucket,
                              rest,
                              timeout_in_seconds=120):
     log = logger.Logger.get_logger()
     log.info('waiting for bucket deletion to complete....')
     start = time.time()
     helper = RestHelper(rest)
     while (time.time() - start) <= timeout_in_seconds:
         if not helper.bucket_exists(bucket):
             return True
         else:
             time.sleep(2)
     return False
    def _create_bucket(self, lww=True, drift=False, name=None):

        if lww:
            self.lww=lww

        if  name:
            self.bucket=name

        helper = RestHelper(self.rest)
        if not helper.bucket_exists(self.bucket):
            node_ram_ratio = BucketOperationHelper.base_bucket_ratio(
                self.servers)
            info = self.rest.get_nodes_self()
            self.rest.create_bucket(bucket=self.bucket,
                ramQuotaMB=512, authType='sasl', lww=self.lww)
            try:
                ready = BucketOperationHelper.wait_for_memcached(self.master,
                    self.bucket)
            except Exception as e:
                self.fail('unable to create bucket')
示例#17
0
class NewUpgradeBaseTest(BaseTestCase):

    def setUp(self):
        super(NewUpgradeBaseTest, self).setUp()
        self.product = self.input.param('product', 'couchbase-server')
        self.initial_version = self.input.param('initial_version', '1.8.1-942-rel')
        self.initial_vbuckets = self.input.param('initial_vbuckets', 64)
        self.rest_settings = self.input.membase_settings
        self.rest = RestConnection(self.master)
        self.rest_helper = RestHelper(self.rest)
        self.sleep_time = 10
        self.data_size = self.input.param('data_size', 1024)
        self.op_types = self.input.param('op_types', 'bucket')
        self.item_flag = self.input.param('item_flag', 4042322160)
        self.expire_time = self.input.param('expire_time', 0)

    def tearDown(self):
        super(NewUpgradeBaseTest, self).tearDown()

    def _install(self, servers):
        params = {}
        params['num_nodes'] = len(servers)
        params['product'] = self.product
        params['version'] = self.initial_version
        params['vbuckets'] = [self.initial_vbuckets]
        InstallerJob().parallel_install(servers, params)
        if self.product in ["couchbase", "couchbase-server", "cb"]:
            success = True
            for server in servers:
                success &= RemoteMachineShellConnection(server).is_couchbase_installed()
                if not success:
                    self.log.info("some nodes were not install successfully!")
                    sys.exit(1)

    def operations(self, multi_nodes=False):
        self.quota = self._initialize_nodes(self.cluster, self.servers, self.disabled_consistent_view)
        self.buckets = []
        gc.collect()
        if self.total_buckets > 0:
            self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets)

        if self.default_bucket:
            self.cluster.create_default_bucket(self.master, self.bucket_size, self.num_replicas)
            self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="",
                                       num_replicas=self.num_replicas, bucket_size=self.bucket_size))

        self._create_sasl_buckets(self.master, self.sasl_buckets)
        self._create_standard_buckets(self.master, self.standard_buckets)
        if multi_nodes:
            servers_in = [self.servers[i+1] for i in range(self.initial_num_servers-1)]
            self.cluster.rebalance(self.servers[:1], servers_in, [])
        if self.op_types == "data":
            self._load_data_all_buckets("create")
            if multi_nodes:
                self._wait_for_stats_all_buckets(self.servers[:self.initial_num_servers])
            else:
                self._wait_for_stats_all_buckets([self.master])

    def _load_data_all_buckets(self, op_type='create', start=0):
        loaded = False
        count = 0
        gen_load = BlobGenerator('upgrade-', 'upgrade-', self.data_size, start=start, end=self.num_items)
        while not loaded and count < 60:
            try :
                self._load_all_buckets(self.master, gen_load, op_type, self.expire_time, 1,
                                       self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
                loaded = True
            except MemcachedError as error:
                if error.status == 134:
                    loaded = False
                    self.log.error("Memcached error 134, wait for 5 seconds and then try again")
                    count += 1
                    time.sleep(self.sleep_time)

    def _get_build(self, server, version, remote, is_amazon=False):
        info = remote.extract_remote_info()
        builds, changes = BuildQuery().get_all_builds()
        self.log.info("finding build %s for machine %s" % (version, server))
        result = re.search('r', version)

        if result is None:
            appropriate_build = BuildQuery().\
                find_membase_release_build('%s-enterprise' % (self.product), info.deliverable_type,
                                           info.architecture_type, version.strip(), is_amazon=is_amazon)
        else:
            appropriate_build = BuildQuery().\
                find_membase_build(builds, '%s-enterprise' % (self.product), info.deliverable_type,
                                   info.architecture_type, version.strip(), is_amazon=is_amazon)

        return appropriate_build

    def _upgrade(self, upgrade_version, server, remote):
        appropriate_build = self._get_build(server, upgrade_version, remote)
        self.assertTrue(appropriate_build.url, msg="unable to find build {0}".format(upgrade_version))
        remote.download_build(appropriate_build)
        remote.membase_upgrade(appropriate_build, save_upgrade_config=False)
        self.rest_helper.is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
        self.rest.init_cluster_port(self.rest_settings.rest_username, self.rest_settings.rest_password)
        time.sleep(self.sleep_time)

    def verification(self, multi_nodes=False):
        for bucket in self.buckets:
            if self.rest_helper.bucket_exists(bucket.name):
                continue
            else:
                raise Exception("bucket:- %s not found" % bucket.name)
            if self.op_types == "bucket":
                bucketinfo = self.rest.get_bucket(bucket.name)
                self.log.info("bucket info :- %s" % bucketinfo)

        if self.op_types == "data":
            if multi_nodes:
                self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
                self._verify_all_buckets(self.master, 1, self.wait_timeout*50, self.max_verify, True, 1)
                self._verify_stats_all_buckets(self.servers[:self.num_servers])
            else:
                self._wait_for_stats_all_buckets([self.master])
                self._verify_all_buckets(self.master, 1, self.wait_timeout*50, self.max_verify, True, 1)
                self._verify_stats_all_buckets([self.master])
 def backup_restore(self):
     try:
         backup_start = self.backups[int(self.backupset.start) - 1]
     except IndexError:
         backup_start = "{0}{1}".format(self.backups[-1],
                                        self.backupset.start)
     try:
         backup_end = self.backups[int(self.backupset.end) - 1]
     except IndexError:
         backup_end = "{0}{1}".format(self.backups[-1], self.backupset.end)
     args = "restore --archive {0} --repo {1} {2} http://{3}:{4} --username {5} "\
            "--password {6} --start {7} --end {8}" \
                            .format(self.backupset.directory, self.backupset.name,
                                    self.cluster_flag, self.backupset.restore_cluster_host.ip,
                                    self.backupset.restore_cluster_host.port,
                                    self.backupset.restore_cluster_host_username,
                                    self.backupset.restore_cluster_host_password,
                                    backup_start, backup_end)
     if self.backupset.exclude_buckets:
         args += " --exclude-buckets {0}".format(
             self.backupset.exclude_buckets)
     if self.backupset.include_buckets:
         args += " --include-buckets {0}".format(
             self.backupset.include_buckets)
     if self.backupset.disable_bucket_config:
         args += " --disable-bucket-config {0}".format(
             self.backupset.disable_bucket_config)
     if self.backupset.disable_views:
         args += " --disable-views {0}".format(self.backupset.disable_views)
     if self.backupset.disable_gsi_indexes:
         args += " --disable-gsi-indexes {0}".format(
             self.backupset.disable_gsi_indexes)
     if self.backupset.disable_ft_indexes:
         args += " --disable-ft-indexes {0}".format(
             self.backupset.disable_ft_indexes)
     if self.backupset.disable_data:
         args += " --disable-data {0}".format(self.backupset.disable_data)
     if self.backupset.disable_conf_res_restriction is not None:
         args += " --disable-conf-res-restriction {0}".format(
             self.backupset.disable_conf_res_restriction)
     if self.backupset.filter_keys:
         args += " --filter_keys {0}".format(self.backupset.filter_keys)
     if self.backupset.filter_values:
         args += " --filter_values {0}".format(self.backupset.filter_values)
     if self.backupset.force_updates:
         args += " --force-updates"
     if self.no_progress_bar:
         args += " --no-progress-bar"
     if not self.skip_buckets:
         rest_conn = RestConnection(self.backupset.restore_cluster_host)
         rest_helper = RestHelper(rest_conn)
         for bucket in self.buckets:
             if not rest_helper.bucket_exists(bucket.name):
                 self.log.info(
                     "Creating bucket {0} in restore host {1}".format(
                         bucket.name,
                         self.backupset.restore_cluster_host.ip))
                 rest_conn.create_bucket(bucket=bucket.name,
                                         ramQuotaMB=512,
                                         authType=bucket.authType
                                         if bucket.authType else 'none',
                                         proxyPort=bucket.port,
                                         saslPassword=bucket.saslPassword,
                                         lww=self.lww_new)
                 bucket_ready = rest_helper.vbucket_map_ready(bucket.name)
                 if not bucket_ready:
                     self.fail("Bucket %s not created after 120 seconds." %
                               bucket.name)
     remote_client = RemoteMachineShellConnection(
         self.backupset.backup_host)
     command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, args)
     output, error = remote_client.execute_command(command)
     remote_client.log_command_output(output, error)
     res = output
     res.extend(error)
     error_str = "Error restoring cluster: Transfer failed. Check the logs for more information."
     if error_str in res:
         command = "cat " + self.backupset.directory + "/logs/backup.log | grep '" + error_str + "' -A 10 -B 100"
         output, error = remote_client.execute_command(command)
         remote_client.log_command_output(output, error)
     if 'Required Flags:' in res:
         self.fail("Command line failed. Please check test params.")
     return output, error
示例#19
0
class PerfBase(unittest.TestCase):
    """
    specURL = http://hub.internal.couchbase.org/confluence/display/cbit/Black+Box+Performance+Test+Matrix

    """

    # The setUpBaseX() methods allow subclasses to resequence the setUp() and
    # skip cluster configuration.
    def setUpBase0(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.vbucket_count = PerfDefaults.vbuckets
        self.sc = None
        if self.parami("tear_down_on_setup",
                       PerfDefaults.tear_down_on_setup) == 1:
            self.tearDown()  # Tear down in case previous run had unclean death
        master = self.input.servers[0]
        self.set_up_rest(master)

    def setUpBase1(self):
        if self.parami('num_buckets', 1) > 1:
            bucket = 'bucket-0'
        else:
            bucket = self.param('bucket', 'default')
        vBuckets = self.rest.get_vbuckets(bucket)
        self.vbucket_count = len(vBuckets)

    def setUp(self):
        self.setUpBase0()

        master = self.input.servers[0]

        self.is_multi_node = False
        self.data_path = master.data_path

        # Number of items loaded by load() method.
        # Does not include or count any items that came from set_up_dgm().
        #
        self.num_items_loaded = 0

        if self.input.clusters:
            for cluster in self.input.clusters.values():
                master = cluster[0]
                self.set_up_rest(master)
                self.set_up_cluster(master)
        else:
            master = self.input.servers[0]
            self.set_up_cluster(master)

        # Rebalance
        num_nodes = self.parami("num_nodes", 10)
        self.rebalance_nodes(num_nodes)

        if self.input.clusters:
            for cluster in self.input.clusters.values():
                master = cluster[0]
                self.set_up_rest(master)
                self.set_up_buckets()
        else:
            self.set_up_buckets()

        self.set_up_proxy()

        if self.input.clusters:
            for cluster in self.input.clusters.values():
                master = cluster[0]
                self.set_up_rest(master)
                self.reconfigure()
        else:
            self.reconfigure()

        if self.parami("dgm", getattr(self, "dgm", 1)):
            self.set_up_dgm()

        time.sleep(10)
        self.setUpBase1()

        if self.input.clusters:
            for cluster in self.input.clusters.values():
                self.wait_until_warmed_up(cluster[0])
        else:
            self.wait_until_warmed_up()
        ClusterOperationHelper.flush_os_caches(self.input.servers)

    def set_up_rest(self, master):
        self.rest = RestConnection(master)
        self.rest_helper = RestHelper(self.rest)

    def set_up_cluster(self, master):
        """Initialize cluster"""

        print "[perf.setUp] Setting up cluster"

        self.rest.init_cluster(master.rest_username, master.rest_password)

        memory_quota = self.parami('mem_quota', PerfDefaults.mem_quota)
        self.rest.init_cluster_memoryQuota(master.rest_username,
                                           master.rest_password,
                                           memoryQuota=memory_quota)

    def set_up_buckets(self):
        """Set up data bucket(s)"""

        print "[perf.setUp] Setting up buckets"

        num_buckets = self.parami('num_buckets', 1)
        if num_buckets > 1:
            self.buckets = ['bucket-{0}'.format(i) for i in range(num_buckets)]
        else:
            self.buckets = [self.param('bucket', 'default')]

        for bucket in self.buckets:
            bucket_ram_quota = self.parami('mem_quota', PerfDefaults.mem_quota)
            bucket_ram_quota = bucket_ram_quota / num_buckets
            replicas = self.parami('replicas', getattr(self, 'replicas', 1))

            self.rest.create_bucket(bucket=bucket,
                                    ramQuotaMB=bucket_ram_quota,
                                    replicaNumber=replicas,
                                    authType='sasl')

            status = self.rest_helper.vbucket_map_ready(bucket, 60)
            self.assertTrue(status, msg='vbucket_map not ready .. timed out')
            status = self.rest_helper.bucket_exists(bucket)
            self.assertTrue(status,
                            msg='unable to create {0} bucket'.format(bucket))

    def reconfigure(self):
        """Customize basic Couchbase setup"""

        print "[perf.setUp] Customizing setup"

        self.set_loglevel()
        self.set_max_concurrent_reps_per_doc()
        self.set_autocompaction()

    def set_loglevel(self):
        """Set custom loglevel"""

        loglevel = self.param('loglevel', None)
        if loglevel:
            self.rest.set_global_loglevel(loglevel)

    def set_max_concurrent_reps_per_doc(self):
        """Set custom MAX_CONCURRENT_REPS_PER_DOC"""

        max_concurrent_reps_per_doc = self.param('max_concurrent_reps_per_doc',
                                                 None)
        if max_concurrent_reps_per_doc:
            for server in self.input.servers:
                rc = RemoteMachineShellConnection(server)
                rc.set_environment_variable('MAX_CONCURRENT_REPS_PER_DOC',
                                            max_concurrent_reps_per_doc)

    def set_ep_compaction(self, comp_ratio):
        """Set up ep_engine side compaction ratio"""
        for server in self.input.servers:
            shell = RemoteMachineShellConnection(server)
            cmd = "/opt/couchbase/bin/cbepctl localhost:11210 "\
                  "set flush_param db_frag_threshold {0}".format(comp_ratio)
            self._exec_and_log(shell, cmd)
            shell.disconnect()

    def set_autocompaction(self, disable_view_compaction=False):
        """Set custom auto-compaction settings"""

        try:
            # Parallel database and view compaction
            parallel_compaction = self.param("parallel_compaction",
                                             PerfDefaults.parallel_compaction)
            # Database fragmentation threshold
            db_compaction = self.parami("db_compaction",
                                        PerfDefaults.db_compaction)
            print "[perf.setUp] database compaction = %d" % db_compaction

            # ep_engine fragementation threshold
            ep_compaction = self.parami("ep_compaction",
                                        PerfDefaults.ep_compaction)
            self.set_ep_compaction(ep_compaction)
            print "[perf.setUp] ep_engine compaction = %d" % ep_compaction

            # View fragmentation threshold
            if disable_view_compaction:
                view_compaction = 100
            else:
                view_compaction = self.parami("view_compaction",
                                              PerfDefaults.view_compaction)
            # Set custom auto-compaction settings
            self.rest.set_auto_compaction(
                parallelDBAndVC=parallel_compaction,
                dbFragmentThresholdPercentage=db_compaction,
                viewFragmntThresholdPercentage=view_compaction)
        except Exception as e:
            # It's very hard to determine what exception it can raise.
            # Therefore we have to use general handler.
            print "ERROR while changing compaction settings: {0}".format(e)

    def tearDown(self):
        if self.parami("tear_down", 0) == 1:
            print "[perf.tearDown] tearDown routine skipped"
            return

        print "[perf.tearDown] tearDown routine starts"

        if self.parami("tear_down_proxy", 1) == 1:
            self.tear_down_proxy()
        else:
            print "[perf.tearDown] Proxy tearDown skipped"

        if self.sc is not None:
            self.sc.stop()
            self.sc = None

        if self.parami("tear_down_bucket", 0) == 1:
            self.tear_down_buckets()
        else:
            print "[perf.tearDown] Bucket tearDown skipped"

        if self.parami("tear_down_cluster", 1) == 1:
            self.tear_down_cluster()
        else:
            print "[perf.tearDown] Cluster tearDown skipped"

        print "[perf.tearDown] tearDown routine finished"

    def tear_down_buckets(self):
        print "[perf.tearDown] Tearing down bucket"
        BucketOperationHelper.delete_all_buckets_or_assert(
            self.input.servers, self)
        print "[perf.tearDown] Bucket teared down"

    def tear_down_cluster(self):
        print "[perf.tearDown] Tearing down cluster"
        ClusterOperationHelper.cleanup_cluster(self.input.servers)
        ClusterOperationHelper.wait_for_ns_servers_or_assert(
            self.input.servers, self)
        print "[perf.tearDown] Cluster teared down"

    def set_up_proxy(self, bucket=None):
        """Set up and start Moxi"""

        if self.input.moxis:
            print '[perf.setUp] Setting up proxy'

            bucket = bucket or self.param('bucket', 'default')

            shell = RemoteMachineShellConnection(self.input.moxis[0])
            shell.start_moxi(self.input.servers[0].ip, bucket,
                             self.input.moxis[0].port)
            shell.disconnect()

    def tear_down_proxy(self):
        if len(self.input.moxis) > 0:
            shell = RemoteMachineShellConnection(self.input.moxis[0])
            shell.stop_moxi()
            shell.disconnect()

    # Returns "host:port" of moxi to hit.
    def target_host_port(self, bucket='default', use_direct=False):
        rv = self.param('moxi', None)
        if use_direct:
            return "%s:%s" % (self.input.servers[0].ip, '11210')
        if rv:
            return rv
        if len(self.input.moxis) > 0:
            return "%s:%s" % (self.input.moxis[0].ip, self.input.moxis[0].port)
        return "%s:%s" % (self.input.servers[0].ip,
                          self.rest.get_bucket(bucket).nodes[0].moxi)

    def protocol_parse(self, protocol_in, use_direct=False):
        if protocol_in.find('://') >= 0:
            if protocol_in.find("couchbase:") >= 0:
                protocol = "couchbase"
            else:
                protocol = \
                    '-'.join(((["membase"] +
                    protocol_in.split("://"))[-2] + "-binary").split('-')[0:2])
            host_port = ('@' + protocol_in.split("://")[-1]).split('@')[-1]
            user, pswd = (('@' + protocol_in.split("://")[-1]).split('@')[-2] +
                          ":").split(':')[0:2]
        else:
            protocol = 'memcached-' + protocol_in
            host_port = self.target_host_port(use_direct=use_direct)
            user = self.param("rest_username", "Administrator")
            pswd = self.param("rest_password", "password")
        return protocol, host_port, user, pswd

    def mk_protocol(self, host, port='8091', prefix='membase-binary'):
        return self.param('protocol', prefix + '://' + host + ':' + port)

    def restartProxy(self, bucket=None):
        self.tear_down_proxy()
        self.set_up_proxy(bucket)

    def set_up_dgm(self):
        """Download fragmented, DGM dataset onto each cluster node, if not
        already locally available.

        The number of vbuckets and database schema must match the
        target cluster.

        Shutdown all cluster nodes.

        Do a cluster-restore.

        Restart all cluster nodes."""

        bucket = self.param("bucket", "default")
        ClusterOperationHelper.stop_cluster(self.input.servers)
        for server in self.input.servers:
            remote = RemoteMachineShellConnection(server)
            #TODO: Better way to pass num_nodes and db_size?
            self.get_data_files(remote, bucket, 1, 10)
            remote.disconnect()
        ClusterOperationHelper.start_cluster(self.input.servers)

    def get_data_files(self, remote, bucket, num_nodes, db_size):
        base = 'https://s3.amazonaws.com/database-analysis'
        dir = '/tmp/'
        if remote.is_couchbase_installed():
            dir = dir + '/couchbase/{0}-{1}-{2}/'.format(
                num_nodes, 256, db_size)
            output, error = remote.execute_command('mkdir -p {0}'.format(dir))
            remote.log_command_output(output, error)
            file = '{0}_cb.tar.gz'.format(bucket)
            base_url = base + '/couchbase/{0}-{1}-{2}/{3}'.format(
                num_nodes, 256, db_size, file)
        else:
            dir = dir + '/membase/{0}-{1}-{2}/'.format(num_nodes, 1024,
                                                       db_size)
            output, error = remote.execute_command('mkdir -p {0}'.format(dir))
            remote.log_command_output(output, error)
            file = '{0}_mb.tar.gz'.format(bucket)
            base_url = base + '/membase/{0}-{1}-{2}/{3}'.format(
                num_nodes, 1024, db_size, file)

        info = remote.extract_remote_info()
        wget_command = 'wget'
        if info.type.lower() == 'windows':
            wget_command = \
                "cd {0} ;cmd /c 'c:\\automation\\wget.exe --no-check-certificate"\
                .format(dir)

        # Check if the file exists on the remote server else download the gzipped version
        # Extract if necessary
        exist = remote.file_exists(dir, file)
        if not exist:
            additional_quote = ""
            if info.type.lower() == 'windows':
                additional_quote = "'"
            command = "{0} -v -O {1}{2} {3} {4} ".format(
                wget_command, dir, file, base_url, additional_quote)
            output, error = remote.execute_command(command)
            remote.log_command_output(output, error)

        if remote.is_couchbase_installed():
            if info.type.lower() == 'windows':
                destination_folder = testconstants.WIN_COUCHBASE_DATA_PATH
            else:
                destination_folder = testconstants.COUCHBASE_DATA_PATH
        else:
            if info.type.lower() == 'windows':
                destination_folder = testconstants.WIN_MEMBASE_DATA_PATH
            else:
                destination_folder = testconstants.MEMBASE_DATA_PATH
        if self.data_path:
            destination_folder = self.data_path
        untar_command = 'cd {1}; tar -xzf {0}'.format(dir + file,
                                                      destination_folder)
        output, error = remote.execute_command(untar_command)
        remote.log_command_output(output, error)

    def _exec_and_log(self, shell, cmd):
        """helper method to execute a command and log output"""
        if not cmd or not shell:
            return

        output, error = shell.execute_command(cmd)
        shell.log_command_output(output, error)

    def _build_tar_name(self,
                        bucket,
                        version="unknown_version",
                        file_base=None):
        """build tar file name.

        {file_base}-{version}-{bucket}.tar.gz
        """
        if not file_base:
            file_base = os.path.splitext(
                os.path.basename(
                    self.param("conf_file", PerfDefaults.conf_file)))[0]
        return "{0}-{1}-{2}.tar.gz".format(file_base, version, bucket)

    def _save_snapshot(self, server, bucket, file_base=None):
        """Save data files to a snapshot"""

        src_data_path = os.path.dirname(server.data_path
                                        or testconstants.COUCHBASE_DATA_PATH)
        dest_data_path = "{0}-snapshots".format(src_data_path)

        print "[perf: _save_snapshot] server = {0} , src_data_path = {1}, dest_data_path = {2}"\
            .format(server.ip, src_data_path, dest_data_path)

        shell = RemoteMachineShellConnection(server)

        build_name, short_version, full_version = \
            shell.find_build_version("/opt/couchbase/", "VERSION.txt", "cb")

        dest_file = self._build_tar_name(bucket, full_version, file_base)

        self._exec_and_log(shell, "mkdir -p {0}".format(dest_data_path))

        # save as gzip file, if file exsits, overwrite
        # TODO: multiple buckets
        zip_cmd = "cd {0}; tar -cvzf {1}/{2} {3} {3}-data _*"\
            .format(src_data_path, dest_data_path, dest_file, bucket)
        self._exec_and_log(shell, zip_cmd)

        shell.disconnect()
        return True

    def _load_snapshot(self, server, bucket, file_base=None, overwrite=True):
        """Load data files from a snapshot"""

        dest_data_path = os.path.dirname(server.data_path
                                         or testconstants.COUCHBASE_DATA_PATH)
        src_data_path = "{0}-snapshots".format(dest_data_path)

        print "[perf: _load_snapshot] server = {0} , src_data_path = {1}, dest_data_path = {2}"\
            .format(server.ip, src_data_path, dest_data_path)

        shell = RemoteMachineShellConnection(server)

        build_name, short_version, full_version = \
            shell.find_build_version("/opt/couchbase/", "VERSION.txt", "cb")

        src_file = self._build_tar_name(bucket, full_version, file_base)

        if not shell.file_exists(src_data_path, src_file):
            print "[perf: _load_snapshot] file '{0}/{1}' does not exist"\
                .format(src_data_path, src_file)
            shell.disconnect()
            return False

        if not overwrite:
            self._save_snapshot(server, bucket, "{0}.tar.gz".format(
                time.strftime(PerfDefaults.strftime)))  # TODO: filename

        rm_cmd = "rm -rf {0}/{1} {0}/{1}-data {0}/_*".format(
            dest_data_path, bucket)
        self._exec_and_log(shell, rm_cmd)

        unzip_cmd = "cd {0}; tar -xvzf {1}/{2}".format(dest_data_path,
                                                       src_data_path, src_file)
        self._exec_and_log(shell, unzip_cmd)

        shell.disconnect()
        return True

    def save_snapshots(self, file_base, bucket):
        """Save snapshots on all servers"""
        if not self.input.servers or not bucket:
            print "[perf: save_snapshot] invalid server list or bucket name"
            return False

        ClusterOperationHelper.stop_cluster(self.input.servers)

        for server in self.input.servers:
            self._save_snapshot(server, bucket, file_base)

        ClusterOperationHelper.start_cluster(self.input.servers)

        return True

    def load_snapshots(self, file_base, bucket):
        """Load snapshots on all servers"""
        if not self.input.servers or not bucket:
            print "[perf: load_snapshot] invalid server list or bucket name"
            return False

        ClusterOperationHelper.stop_cluster(self.input.servers)

        for server in self.input.servers:
            if not self._load_snapshot(server, bucket, file_base):
                ClusterOperationHelper.start_cluster(self.input.servers)
                return False

        ClusterOperationHelper.start_cluster(self.input.servers)

        return True

    def spec(self, reference):
        self.spec_reference = self.param("spec", reference)
        self.log.info("spec: " + reference)

    def mk_stats(self, verbosity):
        return StatsCollector(verbosity)

    def _get_src_version(self):
        """get testrunner version"""
        try:
            result = subprocess.Popen(['git', 'rev-parse', 'HEAD'],
                                      stdout=subprocess.PIPE).communicate()[0]
        except subprocess.CalledProcessError as e:
            print "[perf] unable to get src code version : {0}".format(str(e))
            return "unknown version"
        return result.rstrip()[:7]

    def start_stats(self,
                    stats_spec,
                    servers=None,
                    process_names=['memcached', 'beam.smp', 'couchjs'],
                    test_params=None,
                    client_id='',
                    collect_server_stats=True,
                    ddoc=None):
        if self.parami('stats', 1) == 0:
            return None

        servers = servers or self.input.servers
        sc = self.mk_stats(False)
        bucket = self.param("bucket", "default")
        sc.start(servers,
                 bucket,
                 process_names,
                 stats_spec,
                 10,
                 client_id,
                 collect_server_stats=collect_server_stats,
                 ddoc=ddoc)
        test_params['testrunner'] = self._get_src_version()
        self.test_params = test_params
        self.sc = sc
        return self.sc

    def end_stats(self, sc, total_stats=None, stats_spec=None):
        if sc is None:
            return
        if stats_spec is None:
            stats_spec = self.spec_reference
        if total_stats:
            sc.total_stats(total_stats)
        self.log.info("stopping stats collector")
        sc.stop()
        self.log.info("stats collector is stopped")
        sc.export(stats_spec, self.test_params)

    def load(self,
             num_items,
             min_value_size=None,
             kind='binary',
             protocol='binary',
             ratio_sets=1.0,
             ratio_hot_sets=0.0,
             ratio_hot_gets=0.0,
             ratio_expirations=0.0,
             expiration=None,
             prefix="",
             doc_cache=1,
             use_direct=True,
             report=0,
             start_at=-1,
             collect_server_stats=True,
             is_eperf=False,
             hot_shift=0):
        cfg = {
            'max-items':
            num_items,
            'max-creates':
            num_items,
            'max-ops-per-sec':
            self.parami("load_mcsoda_max_ops_sec",
                        PerfDefaults.mcsoda_max_ops_sec),
            'min-value-size':
            min_value_size or self.parami("min_value_size", 1024),
            'ratio-sets':
            self.paramf("load_ratio_sets", ratio_sets),
            'ratio-misses':
            self.paramf("load_ratio_misses", 0.0),
            'ratio-creates':
            self.paramf("load_ratio_creates", 1.0),
            'ratio-deletes':
            self.paramf("load_ratio_deletes", 0.0),
            'ratio-hot':
            0.0,
            'ratio-hot-sets':
            ratio_hot_sets,
            'ratio-hot-gets':
            ratio_hot_gets,
            'ratio-expirations':
            ratio_expirations,
            'expiration':
            expiration or 0,
            'exit-after-creates':
            1,
            'json':
            int(kind == 'json'),
            'batch':
            self.parami("batch", PerfDefaults.batch),
            'vbuckets':
            self.vbucket_count,
            'doc-cache':
            doc_cache,
            'prefix':
            prefix,
            'report':
            report,
            'hot-shift':
            hot_shift,
            'cluster_name':
            self.param("cluster_name", "")
        }
        cur = {}
        if start_at >= 0:
            cur['cur-items'] = start_at
            cur['cur-gets'] = start_at
            cur['cur-sets'] = start_at
            cur['cur-ops'] = cur['cur-gets'] + cur['cur-sets']
            cur['cur-creates'] = start_at
            cfg['max-creates'] = start_at + num_items
            cfg['max-items'] = cfg['max-creates']

        cfg_params = cfg.copy()
        cfg_params['test_time'] = time.time()
        cfg_params['test_name'] = self.id()

        # phase: 'load' or 'reload'
        phase = "load"
        if self.parami("hot_load_phase", 0) == 1:
            phase = "reload"

        if is_eperf:
            collect_server_stats = self.parami("prefix", 0) == 0
            client_id = self.parami("prefix", 0)
            sc = self.start_stats(
                "{0}.{1}".format(self.spec_reference,
                                 phase),  # stats spec e.x: testname.load
                test_params=cfg_params,
                client_id=client_id,
                collect_server_stats=collect_server_stats)

        # For Black box, multi node tests
        # always use membase-binary
        if self.is_multi_node:
            protocol = self.mk_protocol(host=self.input.servers[0].ip,
                                        port=self.input.servers[0].port)

        protocol, host_port, user, pswd = \
            self.protocol_parse(protocol, use_direct=use_direct)

        if not user.strip():
            user = self.input.servers[0].rest_username
        if not pswd.strip():
            pswd = self.input.servers[0].rest_password

        self.log.info("mcsoda - %s %s %s %s" %
                      (protocol, host_port, user, pswd))
        self.log.info("mcsoda - cfg: " + str(cfg))
        self.log.info("mcsoda - cur: " + str(cur))

        cur, start_time, end_time = \
            self.mcsoda_run(cfg, cur, protocol, host_port, user, pswd,
                            heartbeat=self.parami("mcsoda_heartbeat", 0),
                            why="load", bucket=self.param("bucket", "default"))
        self.num_items_loaded = num_items
        ops = {
            'tot-sets': cur.get('cur-sets', 0),
            'tot-gets': cur.get('cur-gets', 0),
            'tot-items': cur.get('cur-items', 0),
            'tot-creates': cur.get('cur-creates', 0),
            'tot-misses': cur.get('cur-misses', 0),
            "start-time": start_time,
            "end-time": end_time
        }

        if is_eperf:
            if self.parami("load_wait_until_drained", 1) == 1:
                self.wait_until_drained()
            if self.parami("load_wait_until_repl",
                           PerfDefaults.load_wait_until_repl) == 1:
                self.wait_until_repl()
            self.end_stats(sc, ops, "{0}.{1}".format(self.spec_reference,
                                                     phase))

        return ops, start_time, end_time

    def mcsoda_run(self,
                   cfg,
                   cur,
                   protocol,
                   host_port,
                   user,
                   pswd,
                   stats_collector=None,
                   stores=None,
                   ctl=None,
                   heartbeat=0,
                   why="",
                   bucket="default"):
        return mcsoda.run(cfg,
                          cur,
                          protocol,
                          host_port,
                          user,
                          pswd,
                          stats_collector=stats_collector,
                          stores=stores,
                          ctl=ctl,
                          heartbeat=heartbeat,
                          why=why,
                          bucket=bucket)

    def rebalance_nodes(self, num_nodes):
        """Rebalance cluster(s) if more than 1 node provided"""

        if len(self.input.servers) == 1 or num_nodes == 1:
            print "WARNING: running on single node cluster"
            return
        else:
            print "[perf.setUp] rebalancing nodes: num_nodes = {0}".\
                format(num_nodes)

        if self.input.clusters:
            for cluster in self.input.clusters.values():
                status, _ = RebalanceHelper.rebalance_in(cluster,
                                                         num_nodes - 1,
                                                         do_shuffle=False)
                self.assertTrue(status)
        else:
            status, _ = RebalanceHelper.rebalance_in(self.input.servers,
                                                     num_nodes - 1,
                                                     do_shuffle=False)
            self.assertTrue(status)

    @staticmethod
    def delayed_rebalance_worker(servers,
                                 num_nodes,
                                 delay_seconds,
                                 sc,
                                 max_retries=PerfDefaults.reb_max_retries):
        time.sleep(delay_seconds)
        gmt_now = time.strftime(PerfDefaults.strftime, time.gmtime())
        print "[delayed_rebalance_worker] rebalance started: %s" % gmt_now

        if not sc:
            print "[delayed_rebalance_worker] invalid stats collector"
            return
        status = False
        retries = 0
        while not status and retries <= max_retries:
            start_time = time.time()
            status, nodes = RebalanceHelper.rebalance_in(
                servers, num_nodes - 1, do_check=(not retries))
            end_time = time.time()
            print "[delayed_rebalance_worker] status: {0}, nodes: {1}, retries: {2}"\
                .format(status, nodes, retries)
            if not status:
                retries += 1
                time.sleep(delay_seconds)
        sc.reb_stats(start_time, end_time - start_time)

    def delayed_rebalance(self,
                          num_nodes,
                          delay_seconds=10,
                          max_retries=PerfDefaults.reb_max_retries,
                          sync=False):
        print "delayed_rebalance"
        if sync:
            PerfBase.delayed_rebalance_worker(self.input.servers, num_nodes,
                                              delay_seconds, self.sc,
                                              max_retries)
        else:
            t = threading.Thread(target=PerfBase.delayed_rebalance_worker,
                                 args=(self.input.servers, num_nodes,
                                       delay_seconds, self.sc, max_retries))
            t.daemon = True
            t.start()

    @staticmethod
    def set_auto_compaction(server, parallel_compaction, percent_threshold):
        rest = RestConnection(server)
        rest.set_auto_compaction(
            parallel_compaction,
            dbFragmentThresholdPercentage=percent_threshold,
            viewFragmntThresholdPercentage=percent_threshold)

    @staticmethod
    def delayed_compaction_worker(servers, parallel_compaction,
                                  percent_threshold, delay_seconds):
        time.sleep(delay_seconds)
        PerfBase.set_auto_compaction(servers[0], parallel_compaction,
                                     percent_threshold)

    def delayed_compaction(self,
                           parallel_compaction="false",
                           percent_threshold=0.01,
                           delay_seconds=10):
        t = threading.Thread(target=PerfBase.delayed_compaction_worker,
                             args=(self.input.servers, parallel_compaction,
                                   percent_threshold, delay_seconds))
        t.daemon = True
        t.start()

    def loop(self,
             num_ops=None,
             num_items=None,
             max_items=None,
             max_creates=None,
             min_value_size=None,
             exit_after_creates=0,
             kind='binary',
             protocol='binary',
             clients=1,
             ratio_misses=0.0,
             ratio_sets=0.0,
             ratio_creates=0.0,
             ratio_deletes=0.0,
             ratio_hot=0.2,
             ratio_hot_sets=0.95,
             ratio_hot_gets=0.95,
             ratio_expirations=0.0,
             expiration=None,
             test_name=None,
             prefix="",
             doc_cache=1,
             use_direct=True,
             collect_server_stats=True,
             start_at=-1,
             report=0,
             ctl=None,
             hot_shift=0,
             is_eperf=False,
             ratio_queries=0,
             queries=0,
             ddoc=None):
        num_items = num_items or self.num_items_loaded

        hot_stack_size = \
            self.parami('hot_stack_size', PerfDefaults.hot_stack_size) or \
            (num_items * ratio_hot)

        cfg = {
            'max-items':
            max_items or num_items,
            'max-creates':
            max_creates or 0,
            'max-ops-per-sec':
            self.parami("mcsoda_max_ops_sec", PerfDefaults.mcsoda_max_ops_sec),
            'min-value-size':
            min_value_size or self.parami("min_value_size", 1024),
            'exit-after-creates':
            exit_after_creates,
            'ratio-sets':
            ratio_sets,
            'ratio-misses':
            ratio_misses,
            'ratio-creates':
            ratio_creates,
            'ratio-deletes':
            ratio_deletes,
            'ratio-hot':
            ratio_hot,
            'ratio-hot-sets':
            ratio_hot_sets,
            'ratio-hot-gets':
            ratio_hot_gets,
            'ratio-expirations':
            ratio_expirations,
            'ratio-queries':
            ratio_queries,
            'expiration':
            expiration or 0,
            'threads':
            clients,
            'json':
            int(kind == 'json'),
            'batch':
            self.parami("batch", PerfDefaults.batch),
            'vbuckets':
            self.vbucket_count,
            'doc-cache':
            doc_cache,
            'prefix':
            prefix,
            'queries':
            queries,
            'report':
            report,
            'hot-shift':
            hot_shift,
            'hot-stack':
            self.parami("hot_stack", PerfDefaults.hot_stack),
            'hot-stack-size':
            hot_stack_size,
            'hot-stack-rotate':
            self.parami("hot_stack_rotate", PerfDefaults.hot_stack_rotate),
            'cluster_name':
            self.param("cluster_name", ""),
            'observe':
            self.param("observe", PerfDefaults.observe),
            'obs-backoff':
            self.paramf('obs_backoff', PerfDefaults.obs_backoff),
            'obs-max-backoff':
            self.paramf('obs_max_backoff', PerfDefaults.obs_max_backoff),
            'obs-persist-count':
            self.parami('obs_persist_count', PerfDefaults.obs_persist_count),
            'obs-repl-count':
            self.parami('obs_repl_count', PerfDefaults.obs_repl_count),
            'woq-pattern':
            self.parami('woq_pattern', PerfDefaults.woq_pattern),
            'woq-verbose':
            self.parami('woq_verbose', PerfDefaults.woq_verbose),
            'cor-pattern':
            self.parami('cor_pattern', PerfDefaults.cor_pattern),
            'cor-persist':
            self.parami('cor_persist', PerfDefaults.cor_persist),
            'carbon':
            self.parami('carbon', PerfDefaults.carbon),
            'carbon-server':
            self.param('carbon_server', PerfDefaults.carbon_server),
            'carbon-port':
            self.parami('carbon_port', PerfDefaults.carbon_port),
            'carbon-timeout':
            self.parami('carbon_timeout', PerfDefaults.carbon_timeout),
            'carbon-cache-size':
            self.parami('carbon_cache_size', PerfDefaults.carbon_cache_size),
            'time':
            self.parami('time', 0)
        }

        cfg_params = cfg.copy()
        cfg_params['test_time'] = time.time()
        cfg_params['test_name'] = test_name
        client_id = ''
        stores = None

        if is_eperf:
            client_id = self.parami("prefix", 0)
        sc = None
        if self.parami("collect_stats", 1):
            sc = self.start_stats(self.spec_reference + ".loop",
                                  test_params=cfg_params,
                                  client_id=client_id,
                                  collect_server_stats=collect_server_stats,
                                  ddoc=ddoc)

        self.cur = {'cur-items': num_items}
        if start_at >= 0:
            self.cur['cur-gets'] = start_at
        if num_ops is None:
            num_ops = num_items
        if isinstance(num_ops, int):
            cfg['max-ops'] = num_ops
        else:
            # Here, we num_ops looks like "time to run" tuple of...
            # ('seconds', integer_num_of_seconds_to_run)
            cfg['time'] = num_ops[1]

        # For Black box, multi node tests
        # always use membase-binary
        if self.is_multi_node:
            protocol = self.mk_protocol(host=self.input.servers[0].ip,
                                        port=self.input.servers[0].port)

        self.log.info("mcsoda - protocol %s" % protocol)
        protocol, host_port, user, pswd = \
            self.protocol_parse(protocol, use_direct=use_direct)

        if not user.strip():
            user = self.input.servers[0].rest_username
        if not pswd.strip():
            pswd = self.input.servers[0].rest_password

        self.log.info("mcsoda - %s %s %s %s" %
                      (protocol, host_port, user, pswd))
        self.log.info("mcsoda - cfg: " + str(cfg))
        self.log.info("mcsoda - cur: " + str(self.cur))

        # For query tests always use StoreCouchbase
        if protocol == "couchbase":
            stores = [StoreCouchbase()]

        self.cur, start_time, end_time = \
            self.mcsoda_run(cfg, self.cur, protocol, host_port, user, pswd,
                            stats_collector=sc, ctl=ctl, stores=stores,
                            heartbeat=self.parami("mcsoda_heartbeat", 0),
                            why="loop", bucket=self.param("bucket", "default"))

        ops = {
            'tot-sets': self.cur.get('cur-sets', 0),
            'tot-gets': self.cur.get('cur-gets', 0),
            'tot-items': self.cur.get('cur-items', 0),
            'tot-creates': self.cur.get('cur-creates', 0),
            'tot-misses': self.cur.get('cur-misses', 0),
            "start-time": start_time,
            "end-time": end_time
        }

        # Wait until there are no active indexing tasks
        if self.parami('wait_for_indexer', 0):
            ClusterOperationHelper.wait_for_completion(self.rest, 'indexer')

        # Wait until there are no active view compaction tasks
        if self.parami('wait_for_compaction', 0):
            ClusterOperationHelper.wait_for_completion(self.rest,
                                                       'view_compaction')

        if self.parami("loop_wait_until_drained",
                       PerfDefaults.loop_wait_until_drained):
            self.wait_until_drained()

        if self.parami("loop_wait_until_repl",
                       PerfDefaults.loop_wait_until_repl):
            self.wait_until_repl()

        if self.parami("collect_stats", 1) and \
                not self.parami("reb_no_fg", PerfDefaults.reb_no_fg):
            self.end_stats(sc, ops, self.spec_reference + ".loop")

        return ops, start_time, end_time

    def wait_until_drained(self):
        print "[perf.drain] draining disk write queue : %s"\
            % time.strftime(PerfDefaults.strftime)

        master = self.input.servers[0]
        bucket = self.param("bucket", "default")

        RebalanceHelper.wait_for_stats_on_all(
            master,
            bucket,
            'ep_queue_size',
            0,
            fn=RebalanceHelper.wait_for_stats_no_timeout)
        RebalanceHelper.wait_for_stats_on_all(
            master,
            bucket,
            'ep_flusher_todo',
            0,
            fn=RebalanceHelper.wait_for_stats_no_timeout)

        print "[perf.drain] disk write queue has been drained: %s"\
            % time.strftime(PerfDefaults.strftime)

        return time.time()

    def wait_until_repl(self):
        print "[perf.repl] waiting for replication: %s"\
            % time.strftime(PerfDefaults.strftime)

        master = self.input.servers[0]
        bucket = self.param("bucket", "default")

        RebalanceHelper.wait_for_stats_on_all(
            master,
            bucket,
            'vb_replica_queue_size',
            0,
            fn=RebalanceHelper.wait_for_stats_no_timeout)

        RebalanceHelper.wait_for_stats_on_all(
            master,
            bucket,
            'ep_tap_replica_queue_itemondisk',
            0,
            fn=RebalanceHelper.wait_for_stats_no_timeout)

        RebalanceHelper.wait_for_stats_on_all(
            master,
            bucket,
            'ep_tap_rebalance_queue_backfillremaining',
            0,
            fn=RebalanceHelper.wait_for_stats_no_timeout)

        RebalanceHelper.wait_for_stats_on_all(
            master,
            bucket,
            'ep_tap_replica_qlen',
            0,
            fn=RebalanceHelper.wait_for_stats_no_timeout)

        print "[perf.repl] replication is done: %s"\
            % time.strftime(PerfDefaults.strftime)

    def warmup(self, collect_stats=True, flush_os_cache=False):
        """
        Restart cluster and wait for it to warm up.
        In current version, affect the master node only.
        """
        if not self.input.servers:
            print "[warmup error] empty server list"
            return

        if collect_stats:
            client_id = self.parami("prefix", 0)
            test_params = {
                'test_time': time.time(),
                'test_name': self.id(),
                'json': 0
            }
            sc = self.start_stats(self.spec_reference + ".warmup",
                                  test_params=test_params,
                                  client_id=client_id)

        print "[warmup] preparing to warmup cluster ..."

        server = self.input.servers[0]
        shell = RemoteMachineShellConnection(server)

        start_time = time.time()

        print "[warmup] stopping couchbase ... ({0}, {1})"\
            .format(server.ip, time.strftime(PerfDefaults.strftime))
        shell.stop_couchbase()
        print "[warmup] couchbase stopped ({0}, {1})"\
            .format(server.ip, time.strftime(PerfDefaults.strftime))

        if flush_os_cache:
            print "[warmup] flushing os cache ..."
            shell.flush_os_caches()

        shell.start_couchbase()
        print "[warmup] couchbase restarted ({0}, {1})"\
            .format(server.ip, time.strftime(PerfDefaults.strftime))

        self.wait_until_warmed_up()
        print "[warmup] warmup finished"

        end_time = time.time()
        ops = {
            'tot-sets': 0,
            'tot-gets': 0,
            'tot-items': 0,
            'tot-creates': 0,
            'tot-misses': 0,
            "start-time": start_time,
            "end-time": end_time
        }

        if collect_stats:
            self.end_stats(sc, ops, self.spec_reference + ".warmup")

    def wait_until_warmed_up(self, master=None):
        if not master:
            master = self.input.servers[0]

        bucket = self.param("bucket", "default")

        fn = RebalanceHelper.wait_for_mc_stats_no_timeout
        for bucket in self.buckets:
            RebalanceHelper.wait_for_stats_on_all(master,
                                                  bucket,
                                                  'ep_warmup_thread',
                                                  'complete',
                                                  fn=fn)

    def param(self, name, default_value):
        input = getattr(self, "input", TestInputSingleton.input)
        return input.test_params.get(name, default_value)

    def parami(self, name, default_int):
        return int(self.param(name, default_int))

    def paramf(self, name, default_float):
        return float(self.param(name, default_float))

    def params(self, name, default_str):
        return str(self.param(name, default_str))
示例#20
0
class MultiNodesUpgradeTests(NewUpgradeBaseTest):
    def setUp(self):
        super(MultiNodesUpgradeTests, self).setUp()
        if self.initial_version.startswith(
                "1.6") or self.initial_version.startswith("1.7"):
            self.product = 'membase-server'
        else:
            self.product = 'couchbase-server'
        self.initial_num_servers = self.input.param('initial_num_servers', 2)

    def tearDown(self):
        super(MultiNodesUpgradeTests, self).tearDown()

    def offline_cluster_upgrade(self):
        self._install(self.servers[:self.initial_num_servers])
        self.operations(multi_nodes=True)
        upgrade_versions = self.input.param('upgrade_version',
                                            '2.0.0-1870-rel')
        upgrade_versions = upgrade_versions.split(";")
        self.log.info("Installation done going to sleep for %s sec",
                      self.sleep_time)
        time.sleep(self.sleep_time)
        for upgrade_version in upgrade_versions:
            for server in self.servers[:self.initial_num_servers]:
                remote = RemoteMachineShellConnection(server)
                remote.stop_server()
                time.sleep(self.sleep_time)
                remote.disconnect()
            for server in self.servers[:self.initial_num_servers]:
                remote = RemoteMachineShellConnection(server)
                self._upgrade(upgrade_version, server, remote)
                time.sleep(self.sleep_time)
                remote.disconnect()
            time.sleep(self.expire_time)
            self.num_servers = self.initial_num_servers
            self.verification(multi_nodes=True)

    def online_upgrade_rebalance_in_out(self):
        self._install(self.servers[:self.initial_num_servers])
        self.operations(multi_nodes=True)
        self.log.info(
            "Installation of old version is done. Wait for %s sec for upgrade"
            % (self.sleep_time))
        time.sleep(self.sleep_time)
        upgrade_version = self.input.param('upgrade_version', '2.0.0-1870-rel')
        self.initial_version = upgrade_version
        self.product = 'couchbase-server'
        self._install(self.servers[self.initial_num_servers:self.num_servers])
        self.log.info(
            "Installation of new version is done. Wait for %s sec for rebalance"
            % (self.sleep_time))
        time.sleep(self.sleep_time)

        servers_in = self.servers[self.initial_num_servers:self.num_servers]
        self.cluster.rebalance(self.servers[:self.initial_num_servers],
                               servers_in, [])
        self.log.info("Rebalance in all 2.0 Nodes")
        time.sleep(self.sleep_time)
        status, content = ClusterHelper.find_orchestrator(self.master)
        self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
                        format(status, content))
        FIND_MASTER = False
        for new_server in servers_in:
            if content.find(new_server.ip) >= 0:
                FIND_MASTER = True
                self.log.info("2.0 Node %s becomes the master" %
                              (new_server.ip))
        if not FIND_MASTER:
            raise Exception(
                "After rebalance in 2.0 Nodes, 2.0 doesn't become the master")

        servers_out = self.servers[:self.initial_num_servers]
        self.cluster.rebalance(self.servers[:self.num_servers], [],
                               servers_out)
        self.log.info("Rebalance out all old version nodes")
        time.sleep(self.sleep_time)
        self.verify_upgrade_rebalance_in_out()

    def verify_upgrade_rebalance_in_out(self):
        self.master = self.servers[self.initial_num_servers]
        self.rest = RestConnection(self.master)
        self.rest_helper = RestHelper(self.rest)
        for bucket in self.buckets:
            if self.rest_helper.bucket_exists(bucket.name):
                continue
            else:
                raise Exception("bucket:- %s not found" % bucket.name)
        if self.op_types == "bucket":
            bucketinfo = self.rest.get_bucket(bucket.name)
            self.log.info("bucket info :- %s" % bucketinfo)
        if self.op_types == "data":
            self._wait_for_stats_all_buckets(
                self.servers[self.initial_num_servers:self.num_servers])
            self._verify_all_buckets(self.master, 1, self.wait_timeout * 50,
                                     self.max_verify, True, 1)
            self._verify_stats_all_buckets(
                self.servers[self.initial_num_servers:self.num_servers])

    def online_upgrade_swap_rebalance(self):
        self._install(self.servers[:self.initial_num_servers])
        self.operations(multi_nodes=True)
        self.log.info(
            "Installation of old version is done. Wait for %s sec for upgrade"
            % (self.sleep_time))
        time.sleep(self.sleep_time)
        upgrade_version = self.input.param('upgrade_version', '2.0.0-1870-rel')
        self.initial_version = upgrade_version
        self.product = 'couchbase-server'
        self._install(self.servers[self.initial_num_servers:self.num_servers])
        self.log.info(
            "Installation of new version is done. Wait for %s sec for rebalance"
            % (self.sleep_time))
        time.sleep(self.sleep_time)

        self.swap_num_servers = self.input.param('swap_num_servers', 1)
        old_servers = self.servers[:self.initial_num_servers]
        new_servers = []
        for i in range(self.initial_num_servers / self.swap_num_servers):
            servers_in = self.servers[(self.initial_num_servers +
                                       i * self.swap_num_servers):(
                                           self.initial_num_servers +
                                           (i + 1) * self.swap_num_servers)]
            servers_out = self.servers[(
                i * self.swap_num_servers):((i + 1) * self.swap_num_servers)]
            servers = old_servers + new_servers
            self.cluster.rebalance(servers, servers_in, servers_out)
            self.log.info(
                "Swap rebalance: rebalance out %s old version nodes, rebalance in %s 2.0 Nodes"
                % (self.swap_num_servers, self.swap_num_servers))
            time.sleep(self.sleep_time)
            old_servers = self.servers[(
                (i + 1) * self.swap_num_servers):self.initial_num_servers]
            new_servers = new_servers + servers_in
            servers = old_servers + new_servers
            status, content = ClusterHelper.find_orchestrator(servers[0])
            self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
                            format(status, content))
            FIND_MASTER = False
            for new_server in new_servers:
                if content.find(new_server.ip) >= 0:
                    FIND_MASTER = True
                    self.log.info("2.0 Node %s becomes the master" %
                                  (new_server.ip))
            if not FIND_MASTER:
                raise Exception(
                    "After rebalance in 2.0 nodes, 2.0 doesn't become the master "
                )

        self.verify_upgrade_rebalance_in_out()
 def backup_restore(self):
     try:
         backup_start = self.backups[int(self.backupset.start) - 1]
     except IndexError:
         backup_start = "{0}{1}".format(self.backups[-1], self.backupset.start)
     try:
         backup_end = self.backups[int(self.backupset.end) - 1]
     except IndexError:
         backup_end = "{0}{1}".format(self.backups[-1], self.backupset.end)
     args = (
         "restore --archive {0} --repo {1} {2} http://{3}:{4} --username {5} "
         "--password {6} --start {7} --end {8}".format(
             self.backupset.directory,
             self.backupset.name,
             self.cluster_flag,
             self.backupset.restore_cluster_host.ip,
             self.backupset.restore_cluster_host.port,
             self.backupset.restore_cluster_host_username,
             self.backupset.restore_cluster_host_password,
             backup_start,
             backup_end,
         )
     )
     if self.backupset.exclude_buckets:
         args += " --exclude-buckets {0}".format(self.backupset.exclude_buckets)
     if self.backupset.include_buckets:
         args += " --include-buckets {0}".format(self.backupset.include_buckets)
     if self.backupset.disable_bucket_config:
         args += " --disable-bucket-config {0}".format(self.backupset.disable_bucket_config)
     if self.backupset.disable_views:
         args += " --disable-views {0}".format(self.backupset.disable_views)
     if self.backupset.disable_gsi_indexes:
         args += " --disable-gsi-indexes {0}".format(self.backupset.disable_gsi_indexes)
     if self.backupset.disable_ft_indexes:
         args += " --disable-ft-indexes {0}".format(self.backupset.disable_ft_indexes)
     if self.backupset.disable_data:
         args += " --disable-data {0}".format(self.backupset.disable_data)
     if self.backupset.disable_conf_res_restriction is not None:
         args += " --disable-conf-res-restriction {0}".format(self.backupset.disable_conf_res_restriction)
     if self.backupset.filter_keys:
         args += " --filter_keys {0}".format(self.backupset.filter_keys)
     if self.backupset.filter_values:
         args += " --filter_values {0}".format(self.backupset.filter_values)
     if self.backupset.force_updates:
         args += " --force-updates"
     if self.no_progress_bar:
         args += " --no-progress-bar"
     if not self.skip_buckets:
         rest_conn = RestConnection(self.backupset.restore_cluster_host)
         rest_helper = RestHelper(rest_conn)
         for bucket in self.buckets:
             if not rest_helper.bucket_exists(bucket.name):
                 self.log.info(
                     "Creating bucket {0} in restore host {1}".format(
                         bucket.name, self.backupset.restore_cluster_host.ip
                     )
                 )
                 rest_conn.create_bucket(
                     bucket=bucket.name,
                     ramQuotaMB=512,
                     authType=bucket.authType if bucket.authType else "none",
                     proxyPort=bucket.port,
                     saslPassword=bucket.saslPassword,
                     lww=self.lww_new,
                 )
                 bucket_ready = rest_helper.vbucket_map_ready(bucket.name)
                 if not bucket_ready:
                     self.fail("Bucket %s not created after 120 seconds." % bucket.name)
     remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
     command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, args)
     output, error = remote_client.execute_command(command)
     remote_client.log_command_output(output, error)
     res = output
     res.extend(error)
     error_str = "Error restoring cluster: Transfer failed. Check the logs for more information."
     if error_str in res:
         command = "cat " + self.backupset.directory + "/logs/backup.log | grep '" + error_str + "' -A 10 -B 100"
         output, error = remote_client.execute_command(command)
         remote_client.log_command_output(output, error)
     if "Required Flags:" in res:
         self.fail("Command line failed. Please check test params.")
     return output, error
示例#22
0
class NewUpgradeBaseTest(BaseTestCase):
    def setUp(self):
        super(NewUpgradeBaseTest, self).setUp()
        self.product = self.input.param('product', 'couchbase-server')
        self.initial_version = self.input.param('initial_version',
                                                '1.8.1-942-rel')
        self.initial_vbuckets = self.input.param('initial_vbuckets', 64)
        self.rest_settings = self.input.membase_settings
        self.rest = RestConnection(self.master)
        self.rest_helper = RestHelper(self.rest)
        self.sleep_time = 10
        self.data_size = self.input.param('data_size', 1024)
        self.op_types = self.input.param('op_types', 'bucket')
        self.item_flag = self.input.param('item_flag', 4042322160)
        self.expire_time = self.input.param('expire_time', 0)

    def tearDown(self):
        super(NewUpgradeBaseTest, self).tearDown()

    def _install(self, servers):
        params = {}
        params['num_nodes'] = len(servers)
        params['product'] = self.product
        params['version'] = self.initial_version
        params['vbuckets'] = [self.initial_vbuckets]
        InstallerJob().parallel_install(servers, params)
        if self.product in ["couchbase", "couchbase-server", "cb"]:
            success = True
            for server in servers:
                success &= RemoteMachineShellConnection(
                    server).is_couchbase_installed()
                if not success:
                    self.log.info("some nodes were not install successfully!")
                    sys.exit(1)

    def operations(self, multi_nodes=False):
        self.quota = self._initialize_nodes(self.cluster, self.servers,
                                            self.disabled_consistent_view)
        self.buckets = []
        gc.collect()
        if self.total_buckets > 0:
            self.bucket_size = self._get_bucket_size(self.quota,
                                                     self.total_buckets)

        if self.default_bucket:
            self.cluster.create_default_bucket(self.master, self.bucket_size,
                                               self.num_replicas)
            self.buckets.append(
                Bucket(name="default",
                       authType="sasl",
                       saslPassword="",
                       num_replicas=self.num_replicas,
                       bucket_size=self.bucket_size))

        self._create_sasl_buckets(self.master, self.sasl_buckets)
        self._create_standard_buckets(self.master, self.standard_buckets)
        if multi_nodes:
            servers_in = [
                self.servers[i + 1]
                for i in range(self.initial_num_servers - 1)
            ]
            self.cluster.rebalance(self.servers[:1], servers_in, [])
        if self.op_types == "data":
            self._load_data_all_buckets("create")
            if multi_nodes:
                self._wait_for_stats_all_buckets(
                    self.servers[:self.initial_num_servers])
            else:
                self._wait_for_stats_all_buckets([self.master])

    def _load_data_all_buckets(self, op_type='create', start=0):
        loaded = False
        count = 0
        gen_load = BlobGenerator('upgrade-',
                                 'upgrade-',
                                 self.data_size,
                                 start=start,
                                 end=self.num_items)
        while not loaded and count < 60:
            try:
                self._load_all_buckets(self.master,
                                       gen_load,
                                       op_type,
                                       self.expire_time,
                                       1,
                                       self.item_flag,
                                       True,
                                       batch_size=20000,
                                       pause_secs=5,
                                       timeout_secs=180)
                loaded = True
            except MemcachedError as error:
                if error.status == 134:
                    loaded = False
                    self.log.error(
                        "Memcached error 134, wait for 5 seconds and then try again"
                    )
                    count += 1
                    time.sleep(self.sleep_time)

    def _get_build(self, server, version, remote, is_amazon=False):
        info = remote.extract_remote_info()
        builds, changes = BuildQuery().get_all_builds()
        self.log.info("finding build %s for machine %s" % (version, server))
        result = re.search('r', version)

        if result is None:
            appropriate_build = BuildQuery().\
                find_membase_release_build('%s-enterprise' % (self.product), info.deliverable_type,
                                           info.architecture_type, version.strip(), is_amazon=is_amazon)
        else:
            appropriate_build = BuildQuery().\
                find_membase_build(builds, '%s-enterprise' % (self.product), info.deliverable_type,
                                   info.architecture_type, version.strip(), is_amazon=is_amazon)

        return appropriate_build

    def _upgrade(self, upgrade_version, server, remote):
        appropriate_build = self._get_build(server, upgrade_version, remote)
        self.assertTrue(appropriate_build.url,
                        msg="unable to find build {0}".format(upgrade_version))
        remote.download_build(appropriate_build)
        remote.membase_upgrade(appropriate_build, save_upgrade_config=False)
        self.rest_helper.is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
        self.rest.init_cluster_port(self.rest_settings.rest_username,
                                    self.rest_settings.rest_password)
        time.sleep(self.sleep_time)

    def verification(self, multi_nodes=False):
        for bucket in self.buckets:
            if self.rest_helper.bucket_exists(bucket.name):
                continue
            else:
                raise Exception("bucket:- %s not found" % bucket.name)
            if self.op_types == "bucket":
                bucketinfo = self.rest.get_bucket(bucket.name)
                self.log.info("bucket info :- %s" % bucketinfo)

        if self.op_types == "data":
            if multi_nodes:
                self._wait_for_stats_all_buckets(
                    self.servers[:self.num_servers])
                self._verify_all_buckets(self.master, 1,
                                         self.wait_timeout * 50,
                                         self.max_verify, True, 1)
                self._verify_stats_all_buckets(self.servers[:self.num_servers])
            else:
                self._wait_for_stats_all_buckets([self.master])
                self._verify_all_buckets(self.master, 1,
                                         self.wait_timeout * 50,
                                         self.max_verify, True, 1)
                self._verify_stats_all_buckets([self.master])
示例#23
0
class PerfBase(unittest.TestCase):

    """
    specURL = http://hub.internal.couchbase.org/confluence/display/cbit/Black+Box+Performance+Test+Matrix

    """

    # The setUpBaseX() methods allow subclasses to resequence the setUp() and
    # skip cluster configuration.
    def setUpBase0(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.vbucket_count = PerfDefaults.vbuckets
        self.sc = None
        if self.parami("tear_down_on_setup",
                       PerfDefaults.tear_down_on_setup) == 1:
            self.tearDown()  # Tear down in case previous run had unclean death
        master = self.input.servers[0]
        self.set_up_rest(master)

    def setUpBase1(self):
        if self.parami('num_buckets', 1) > 1:
            bucket = 'bucket-0'
        else:
            bucket = self.param('bucket', 'default')
        vBuckets = self.rest.get_vbuckets(bucket)
        self.vbucket_count = len(vBuckets)

    def setUp(self):
        self.setUpBase0()

        master = self.input.servers[0]

        self.is_multi_node = False
        self.data_path = master.data_path

        # Number of items loaded by load() method.
        # Does not include or count any items that came from set_up_dgm().
        #
        self.num_items_loaded = 0

        if self.input.clusters:
            for cluster in self.input.clusters.values():
                master = cluster[0]
                self.set_up_rest(master)
                self.set_up_cluster(master)
        else:
            master = self.input.servers[0]
            self.set_up_cluster(master)

        # Rebalance
        num_nodes = self.parami("num_nodes", 10)
        self.rebalance_nodes(num_nodes)

        if self.input.clusters:
            for cluster in self.input.clusters.values():
                master = cluster[0]
                self.set_up_rest(master)
                self.set_up_buckets()
        else:
            self.set_up_buckets()

        self.set_up_proxy()

        if self.input.clusters:
            for cluster in self.input.clusters.values():
                master = cluster[0]
                self.set_up_rest(master)
                self.reconfigure()
        else:
            self.reconfigure()

        if self.parami("dgm", getattr(self, "dgm", 1)):
            self.set_up_dgm()

        time.sleep(10)
        self.setUpBase1()

        if self.input.clusters:
            for cluster in self.input.clusters.values():
                self.wait_until_warmed_up(cluster[0])
        else:
            self.wait_until_warmed_up()
        ClusterOperationHelper.flush_os_caches(self.input.servers)

    def set_up_rest(self, master):
        self.rest = RestConnection(master)
        self.rest_helper = RestHelper(self.rest)

    def set_up_cluster(self, master):
        """Initialize cluster"""

        print "[perf.setUp] Setting up cluster"

        self.rest.init_cluster(master.rest_username, master.rest_password)

        memory_quota = self.parami('mem_quota', PerfDefaults.mem_quota)
        self.rest.init_cluster_memoryQuota(master.rest_username,
                                           master.rest_password,
                                           memoryQuota=memory_quota)

    def set_up_buckets(self):
        """Set up data bucket(s)"""

        print "[perf.setUp] Setting up buckets"

        num_buckets = self.parami('num_buckets', 1)
        if num_buckets > 1:
            self.buckets = ['bucket-{0}'.format(i) for i in range(num_buckets)]
        else:
            self.buckets = [self.param('bucket', 'default')]

        for bucket in self.buckets:
            bucket_ram_quota = self.parami('mem_quota', PerfDefaults.mem_quota)
            bucket_ram_quota = bucket_ram_quota / num_buckets
            replicas = self.parami('replicas', getattr(self, 'replicas', 1))

            self.rest.create_bucket(bucket=bucket, ramQuotaMB=bucket_ram_quota,
                                    replicaNumber=replicas, authType='sasl')

            status = self.rest_helper.vbucket_map_ready(bucket, 60)
            self.assertTrue(status, msg='vbucket_map not ready .. timed out')
            status = self.rest_helper.bucket_exists(bucket)
            self.assertTrue(status,
                            msg='unable to create {0} bucket'.format(bucket))

    def reconfigure(self):
        """Customize basic Couchbase setup"""

        print "[perf.setUp] Customizing setup"

        self.set_loglevel()
        self.set_max_concurrent_reps_per_doc()
        self.set_autocompaction()

    def set_loglevel(self):
        """Set custom loglevel"""

        loglevel = self.param('loglevel', None)
        if loglevel:
            self.rest.set_global_loglevel(loglevel)

    def set_max_concurrent_reps_per_doc(self):
        """Set custom MAX_CONCURRENT_REPS_PER_DOC"""

        max_concurrent_reps_per_doc = self.param('max_concurrent_reps_per_doc',
                                                 None)
        if max_concurrent_reps_per_doc:
            for server in self.input.servers:
                rc = RemoteMachineShellConnection(server)
                rc.set_environment_variable('MAX_CONCURRENT_REPS_PER_DOC',
                                            max_concurrent_reps_per_doc)

    def set_ep_compaction(self, comp_ratio):
        """Set up ep_engine side compaction ratio"""
        for server in self.input.servers:
            shell = RemoteMachineShellConnection(server)
            cmd = "/opt/couchbase/bin/cbepctl localhost:11210 "\
                  "set flush_param db_frag_threshold {0}".format(comp_ratio)
            self._exec_and_log(shell, cmd)
            shell.disconnect()

    def set_autocompaction(self, disable_view_compaction=False):
        """Set custom auto-compaction settings"""

        try:
            # Parallel database and view compaction
            parallel_compaction = self.param("parallel_compaction",
                                             PerfDefaults.parallel_compaction)
            # Database fragmentation threshold
            db_compaction = self.parami("db_compaction",
                                        PerfDefaults.db_compaction)
            print "[perf.setUp] database compaction = %d" % db_compaction

            # ep_engine fragementation threshold
            ep_compaction = self.parami("ep_compaction",
                                        PerfDefaults.ep_compaction)
            self.set_ep_compaction(ep_compaction)
            print "[perf.setUp] ep_engine compaction = %d" % ep_compaction

            # View fragmentation threshold
            if disable_view_compaction:
                view_compaction = 100
            else:
                view_compaction = self.parami("view_compaction",
                                              PerfDefaults.view_compaction)
            # Set custom auto-compaction settings
            self.rest.set_auto_compaction(parallelDBAndVC=parallel_compaction,
                                          dbFragmentThresholdPercentage=db_compaction,
                                          viewFragmntThresholdPercentage=view_compaction)
        except Exception as e:
            # It's very hard to determine what exception it can raise.
            # Therefore we have to use general handler.
            print "ERROR while changing compaction settings: {0}".format(e)

    def tearDown(self):
        if self.parami("tear_down", 0) == 1:
            print "[perf.tearDown] tearDown routine skipped"
            return

        print "[perf.tearDown] tearDown routine starts"

        if self.parami("tear_down_proxy", 1) == 1:
            self.tear_down_proxy()
        else:
            print "[perf.tearDown] Proxy tearDown skipped"

        if self.sc is not None:
            self.sc.stop()
            self.sc = None

        if self.parami("tear_down_bucket", 0) == 1:
            self.tear_down_buckets()
        else:
            print "[perf.tearDown] Bucket tearDown skipped"

        if self.parami("tear_down_cluster", 1) == 1:
            self.tear_down_cluster()
        else:
            print "[perf.tearDown] Cluster tearDown skipped"

        print "[perf.tearDown] tearDown routine finished"

    def tear_down_buckets(self):
        print "[perf.tearDown] Tearing down bucket"
        BucketOperationHelper.delete_all_buckets_or_assert(self.input.servers,
                                                           self)
        print "[perf.tearDown] Bucket teared down"

    def tear_down_cluster(self):
        print "[perf.tearDown] Tearing down cluster"
        ClusterOperationHelper.cleanup_cluster(self.input.servers)
        ClusterOperationHelper.wait_for_ns_servers_or_assert(self.input.servers,
                                                             self)
        print "[perf.tearDown] Cluster teared down"

    def set_up_proxy(self, bucket=None):
        """Set up and start Moxi"""

        if self.input.moxis:
            print '[perf.setUp] Setting up proxy'

            bucket = bucket or self.param('bucket', 'default')

            shell = RemoteMachineShellConnection(self.input.moxis[0])
            shell.start_moxi(self.input.servers[0].ip, bucket,
                             self.input.moxis[0].port)
            shell.disconnect()

    def tear_down_proxy(self):
        if len(self.input.moxis) > 0:
            shell = RemoteMachineShellConnection(self.input.moxis[0])
            shell.stop_moxi()
            shell.disconnect()

    # Returns "host:port" of moxi to hit.
    def target_host_port(self, bucket='default', use_direct=False):
        rv = self.param('moxi', None)
        if use_direct:
            return "%s:%s" % (self.input.servers[0].ip,
                              '11210')
        if rv:
            return rv
        if len(self.input.moxis) > 0:
            return "%s:%s" % (self.input.moxis[0].ip,
                              self.input.moxis[0].port)
        return "%s:%s" % (self.input.servers[0].ip,
                          self.rest.get_bucket(bucket).nodes[0].moxi)

    def protocol_parse(self, protocol_in, use_direct=False):
        if protocol_in.find('://') >= 0:
            if protocol_in.find("couchbase:") >= 0:
                protocol = "couchbase"
            else:
                protocol = \
                    '-'.join(((["membase"] +
                    protocol_in.split("://"))[-2] + "-binary").split('-')[0:2])
            host_port = ('@' + protocol_in.split("://")[-1]).split('@')[-1]
            user, pswd = (('@' +
                           protocol_in.split("://")[-1]).split('@')[-2] +
                           ":").split(':')[0:2]
        else:
            protocol = 'memcached-' + protocol_in
            host_port = self.target_host_port(use_direct=use_direct)
            user = self.param("rest_username", "Administrator")
            pswd = self.param("rest_password", "password")
        return protocol, host_port, user, pswd

    def mk_protocol(self, host, port='8091', prefix='membase-binary'):
        return self.param('protocol',
                          prefix + '://' + host + ':' + port)

    def restartProxy(self, bucket=None):
        self.tear_down_proxy()
        self.set_up_proxy(bucket)

    def set_up_dgm(self):
        """Download fragmented, DGM dataset onto each cluster node, if not
        already locally available.

        The number of vbuckets and database schema must match the
        target cluster.

        Shutdown all cluster nodes.

        Do a cluster-restore.

        Restart all cluster nodes."""

        bucket = self.param("bucket", "default")
        ClusterOperationHelper.stop_cluster(self.input.servers)
        for server in self.input.servers:
            remote = RemoteMachineShellConnection(server)
            #TODO: Better way to pass num_nodes and db_size?
            self.get_data_files(remote, bucket, 1, 10)
            remote.disconnect()
        ClusterOperationHelper.start_cluster(self.input.servers)

    def get_data_files(self, remote, bucket, num_nodes, db_size):
        base = 'https://s3.amazonaws.com/database-analysis'
        dir = '/tmp/'
        if remote.is_couchbase_installed():
            dir = dir + '/couchbase/{0}-{1}-{2}/'.format(num_nodes, 256,
                                                         db_size)
            output, error = remote.execute_command('mkdir -p {0}'.format(dir))
            remote.log_command_output(output, error)
            file = '{0}_cb.tar.gz'.format(bucket)
            base_url = base + '/couchbase/{0}-{1}-{2}/{3}'.format(num_nodes,
                                                                  256, db_size,
                                                                  file)
        else:
            dir = dir + '/membase/{0}-{1}-{2}/'.format(num_nodes, 1024,
                                                       db_size)
            output, error = remote.execute_command('mkdir -p {0}'.format(dir))
            remote.log_command_output(output, error)
            file = '{0}_mb.tar.gz'.format(bucket)
            base_url = base + '/membase/{0}-{1}-{2}/{3}'.format(num_nodes,
                                                                1024, db_size,
                                                                file)


        info = remote.extract_remote_info()
        wget_command = 'wget'
        if info.type.lower() == 'windows':
            wget_command = \
                "cd {0} ;cmd /c 'c:\\automation\\wget.exe --no-check-certificate"\
                .format(dir)

        # Check if the file exists on the remote server else download the gzipped version
        # Extract if necessary
        exist = remote.file_exists(dir, file)
        if not exist:
            additional_quote = ""
            if info.type.lower() == 'windows':
                additional_quote = "'"
            command = "{0} -v -O {1}{2} {3} {4} ".format(wget_command, dir,
                                                         file, base_url,
                                                         additional_quote)
            output, error = remote.execute_command(command)
            remote.log_command_output(output, error)

        if remote.is_couchbase_installed():
            if info.type.lower() == 'windows':
                destination_folder = testconstants.WIN_COUCHBASE_DATA_PATH
            else:
                destination_folder = testconstants.COUCHBASE_DATA_PATH
        else:
            if info.type.lower() == 'windows':
                destination_folder = testconstants.WIN_MEMBASE_DATA_PATH
            else:
                destination_folder = testconstants.MEMBASE_DATA_PATH
        if self.data_path:
            destination_folder = self.data_path
        untar_command = 'cd {1}; tar -xzf {0}'.format(dir + file,
                                                      destination_folder)
        output, error = remote.execute_command(untar_command)
        remote.log_command_output(output, error)

    def _exec_and_log(self, shell, cmd):
        """helper method to execute a command and log output"""
        if not cmd or not shell:
            return

        output, error = shell.execute_command(cmd)
        shell.log_command_output(output, error)

    def _build_tar_name(self, bucket, version="unknown_version",
                        file_base=None):
        """build tar file name.

        {file_base}-{version}-{bucket}.tar.gz
        """
        if not file_base:
            file_base = os.path.splitext(
                os.path.basename(self.param("conf_file",
                                 PerfDefaults.conf_file)))[0]
        return "{0}-{1}-{2}.tar.gz".format(file_base, version, bucket)

    def _save_snapshot(self, server, bucket, file_base=None):
        """Save data files to a snapshot"""

        src_data_path = os.path.dirname(server.data_path or
                                        testconstants.COUCHBASE_DATA_PATH)
        dest_data_path = "{0}-snapshots".format(src_data_path)

        print "[perf: _save_snapshot] server = {0} , src_data_path = {1}, dest_data_path = {2}"\
            .format(server.ip, src_data_path, dest_data_path)

        shell = RemoteMachineShellConnection(server)

        build_name, short_version, full_version = \
            shell.find_build_version("/opt/couchbase/", "VERSION.txt", "cb")

        dest_file = self._build_tar_name(bucket, full_version, file_base)

        self._exec_and_log(shell, "mkdir -p {0}".format(dest_data_path))

        # save as gzip file, if file exsits, overwrite
        # TODO: multiple buckets
        zip_cmd = "cd {0}; tar -cvzf {1}/{2} {3} {3}-data _*"\
            .format(src_data_path, dest_data_path, dest_file, bucket)
        self._exec_and_log(shell, zip_cmd)

        shell.disconnect()
        return True

    def _load_snapshot(self, server, bucket, file_base=None, overwrite=True):
        """Load data files from a snapshot"""

        dest_data_path = os.path.dirname(server.data_path or
                                         testconstants.COUCHBASE_DATA_PATH)
        src_data_path = "{0}-snapshots".format(dest_data_path)

        print "[perf: _load_snapshot] server = {0} , src_data_path = {1}, dest_data_path = {2}"\
            .format(server.ip, src_data_path, dest_data_path)

        shell = RemoteMachineShellConnection(server)

        build_name, short_version, full_version = \
            shell.find_build_version("/opt/couchbase/", "VERSION.txt", "cb")

        src_file = self._build_tar_name(bucket, full_version, file_base)

        if not shell.file_exists(src_data_path, src_file):
            print "[perf: _load_snapshot] file '{0}/{1}' does not exist"\
                .format(src_data_path, src_file)
            shell.disconnect()
            return False

        if not overwrite:
            self._save_snapshot(server, bucket,
                                "{0}.tar.gz".format(
                                    time.strftime(PerfDefaults.strftime)))  # TODO: filename

        rm_cmd = "rm -rf {0}/{1} {0}/{1}-data {0}/_*".format(dest_data_path,
                                                             bucket)
        self._exec_and_log(shell, rm_cmd)

        unzip_cmd = "cd {0}; tar -xvzf {1}/{2}".format(dest_data_path,
                                                       src_data_path, src_file)
        self._exec_and_log(shell, unzip_cmd)

        shell.disconnect()
        return True

    def save_snapshots(self, file_base, bucket):
        """Save snapshots on all servers"""
        if not self.input.servers or not bucket:
            print "[perf: save_snapshot] invalid server list or bucket name"
            return False

        ClusterOperationHelper.stop_cluster(self.input.servers)

        for server in self.input.servers:
            self._save_snapshot(server, bucket, file_base)

        ClusterOperationHelper.start_cluster(self.input.servers)

        return True

    def load_snapshots(self, file_base, bucket):
        """Load snapshots on all servers"""
        if not self.input.servers or not bucket:
            print "[perf: load_snapshot] invalid server list or bucket name"
            return False

        ClusterOperationHelper.stop_cluster(self.input.servers)

        for server in self.input.servers:
            if not self._load_snapshot(server, bucket, file_base):
                ClusterOperationHelper.start_cluster(self.input.servers)
                return False

        ClusterOperationHelper.start_cluster(self.input.servers)

        return True

    def spec(self, reference):
        self.spec_reference = self.param("spec", reference)
        self.log.info("spec: " + reference)

    def mk_stats(self, verbosity):
        return StatsCollector(verbosity)

    def _get_src_version(self):
        """get testrunner version"""
        try:
            result = subprocess.Popen(['git', 'rev-parse', 'HEAD'],
                                      stdout=subprocess.PIPE).communicate()[0]
        except subprocess.CalledProcessError as e:
            print "[perf] unable to get src code version : {0}".format(str(e))
            return "unknown version"
        return result.rstrip()[:7]

    def start_stats(self, stats_spec, servers=None,
                    process_names=['memcached', 'beam.smp', 'couchjs'],
                    test_params=None, client_id='',
                    collect_server_stats=True, ddoc=None):
        if self.parami('stats', 1) == 0:
            return None

        servers = servers or self.input.servers
        sc = self.mk_stats(False)
        bucket = self.param("bucket", "default")
        sc.start(servers, bucket, process_names, stats_spec, 10, client_id,
                 collect_server_stats=collect_server_stats, ddoc=ddoc)
        test_params['testrunner'] = self._get_src_version()
        self.test_params = test_params
        self.sc = sc
        return self.sc

    def end_stats(self, sc, total_stats=None, stats_spec=None):
        if sc is None:
            return
        if stats_spec is None:
            stats_spec = self.spec_reference
        if total_stats:
            sc.total_stats(total_stats)
        self.log.info("stopping stats collector")
        sc.stop()
        self.log.info("stats collector is stopped")
        sc.export(stats_spec, self.test_params)

    def load(self, num_items, min_value_size=None,
             kind='binary',
             protocol='binary',
             ratio_sets=1.0,
             ratio_hot_sets=0.0,
             ratio_hot_gets=0.0,
             ratio_expirations=0.0,
             expiration=None,
             prefix="",
             doc_cache=1,
             use_direct=True,
             report=0,
             start_at= -1,
             collect_server_stats=True,
             is_eperf=False,
             hot_shift=0):
        cfg = {'max-items': num_items,
               'max-creates': num_items,
               'max-ops-per-sec': self.parami("load_mcsoda_max_ops_sec",
                                              PerfDefaults.mcsoda_max_ops_sec),
               'min-value-size': min_value_size or self.parami("min_value_size",
                                                               1024),
               'ratio-sets': self.paramf("load_ratio_sets", ratio_sets),
               'ratio-misses': self.paramf("load_ratio_misses", 0.0),
               'ratio-creates': self.paramf("load_ratio_creates", 1.0),
               'ratio-deletes': self.paramf("load_ratio_deletes", 0.0),
               'ratio-hot': 0.0,
               'ratio-hot-sets': ratio_hot_sets,
               'ratio-hot-gets': ratio_hot_gets,
               'ratio-expirations': ratio_expirations,
               'expiration': expiration or 0,
               'exit-after-creates': 1,
               'json': int(kind == 'json'),
               'batch': self.parami("batch", PerfDefaults.batch),
               'vbuckets': self.vbucket_count,
               'doc-cache': doc_cache,
               'prefix': prefix,
               'report': report,
               'hot-shift': hot_shift,
               'cluster_name': self.param("cluster_name", "")}
        cur = {}
        if start_at >= 0:
            cur['cur-items'] = start_at
            cur['cur-gets'] = start_at
            cur['cur-sets'] = start_at
            cur['cur-ops'] = cur['cur-gets'] + cur['cur-sets']
            cur['cur-creates'] = start_at
            cfg['max-creates'] = start_at + num_items
            cfg['max-items'] = cfg['max-creates']

        cfg_params = cfg.copy()
        cfg_params['test_time'] = time.time()
        cfg_params['test_name'] = self.id()

        # phase: 'load' or 'reload'
        phase = "load"
        if self.parami("hot_load_phase", 0) == 1:
            phase = "reload"

        if is_eperf:
            collect_server_stats = self.parami("prefix", 0) == 0
            client_id = self.parami("prefix", 0)
            sc = self.start_stats("{0}.{1}".format(self.spec_reference, phase), # stats spec e.x: testname.load
                                  test_params=cfg_params, client_id=client_id,
                                  collect_server_stats=collect_server_stats)

        # For Black box, multi node tests
        # always use membase-binary
        if self.is_multi_node:
            protocol = self.mk_protocol(host=self.input.servers[0].ip,
                                        port=self.input.servers[0].port)

        protocol, host_port, user, pswd = \
            self.protocol_parse(protocol, use_direct=use_direct)

        if not user.strip():
            user = self.input.servers[0].rest_username
        if not pswd.strip():
            pswd = self.input.servers[0].rest_password

        self.log.info("mcsoda - %s %s %s %s" %
                      (protocol, host_port, user, pswd))
        self.log.info("mcsoda - cfg: " + str(cfg))
        self.log.info("mcsoda - cur: " + str(cur))

        cur, start_time, end_time = \
            self.mcsoda_run(cfg, cur, protocol, host_port, user, pswd,
                            heartbeat=self.parami("mcsoda_heartbeat", 0),
                            why="load", bucket=self.param("bucket", "default"))
        self.num_items_loaded = num_items
        ops = {'tot-sets': cur.get('cur-sets', 0),
               'tot-gets': cur.get('cur-gets', 0),
               'tot-items': cur.get('cur-items', 0),
               'tot-creates': cur.get('cur-creates', 0),
               'tot-misses': cur.get('cur-misses', 0),
               "start-time": start_time,
               "end-time": end_time}

        if is_eperf:
            if self.parami("load_wait_until_drained", 1) == 1:
                self.wait_until_drained()
            if self.parami("load_wait_until_repl",
                PerfDefaults.load_wait_until_repl) == 1:
                self.wait_until_repl()
            self.end_stats(sc, ops, "{0}.{1}".format(self.spec_reference,
                                                     phase))

        return ops, start_time, end_time

    def mcsoda_run(self, cfg, cur, protocol, host_port, user, pswd,
                   stats_collector=None, stores=None, ctl=None,
                   heartbeat=0, why="", bucket="default"):
        return mcsoda.run(cfg, cur, protocol, host_port, user, pswd,
                          stats_collector=stats_collector,
                          stores=stores,
                          ctl=ctl,
                          heartbeat=heartbeat,
                          why=why,
                          bucket=bucket)

    def rebalance_nodes(self, num_nodes):
        """Rebalance cluster(s) if more than 1 node provided"""

        if len(self.input.servers) == 1 or num_nodes == 1:
            print "WARNING: running on single node cluster"
            return
        else:
            print "[perf.setUp] rebalancing nodes: num_nodes = {0}".\
                format(num_nodes)

        if self.input.clusters:
            for cluster in self.input.clusters.values():
                status, _ = RebalanceHelper.rebalance_in(cluster,
                                                         num_nodes - 1,
                                                         do_shuffle=False)
                self.assertTrue(status)
        else:
            status, _ = RebalanceHelper.rebalance_in(self.input.servers,
                                                     num_nodes - 1,
                                                     do_shuffle=False)
            self.assertTrue(status)

    @staticmethod
    def delayed_rebalance_worker(servers, num_nodes, delay_seconds, sc,
                                 max_retries=PerfDefaults.reb_max_retries):
        time.sleep(delay_seconds)
        gmt_now = time.strftime(PerfDefaults.strftime, time.gmtime())
        print "[delayed_rebalance_worker] rebalance started: %s" % gmt_now

        if not sc:
            print "[delayed_rebalance_worker] invalid stats collector"
            return
        status = False
        retries = 0
        while not status and retries <= max_retries:
            start_time = time.time()
            status, nodes = RebalanceHelper.rebalance_in(servers,
                                                         num_nodes - 1,
                                                         do_check=(not retries))
            end_time = time.time()
            print "[delayed_rebalance_worker] status: {0}, nodes: {1}, retries: {2}"\
                .format(status, nodes, retries)
            if not status:
                retries += 1
                time.sleep(delay_seconds)
        sc.reb_stats(start_time, end_time - start_time)

    def delayed_rebalance(self, num_nodes, delay_seconds=10,
                          max_retries=PerfDefaults.reb_max_retries,
                          sync=False):
        print "delayed_rebalance"
        if sync:
            PerfBase.delayed_rebalance_worker(self.input.servers,
                    num_nodes, delay_seconds, self.sc, max_retries)
        else:
            t = threading.Thread(target=PerfBase.delayed_rebalance_worker,
                                 args=(self.input.servers, num_nodes,
                                 delay_seconds, self.sc, max_retries))
            t.daemon = True
            t.start()

    @staticmethod
    def set_auto_compaction(server, parallel_compaction, percent_threshold):
        rest = RestConnection(server)
        rest.set_auto_compaction(parallel_compaction,
                                 dbFragmentThresholdPercentage=percent_threshold,
                                 viewFragmntThresholdPercentage=percent_threshold)

    @staticmethod
    def delayed_compaction_worker(servers, parallel_compaction,
                                  percent_threshold, delay_seconds):
        time.sleep(delay_seconds)
        PerfBase.set_auto_compaction(servers[0], parallel_compaction,
                                     percent_threshold)

    def delayed_compaction(self, parallel_compaction="false",
                           percent_threshold=0.01,
                           delay_seconds=10):
        t = threading.Thread(target=PerfBase.delayed_compaction_worker,
                             args=(self.input.servers,
                                   parallel_compaction,
                                   percent_threshold,
                                   delay_seconds))
        t.daemon = True
        t.start()

    def loop(self, num_ops=None,
             num_items=None,
             max_items=None,
             max_creates=None,
             min_value_size=None,
             exit_after_creates=0,
             kind='binary',
             protocol='binary',
             clients=1,
             ratio_misses=0.0,
             ratio_sets=0.0, ratio_creates=0.0, ratio_deletes=0.0,
             ratio_hot=0.2, ratio_hot_sets=0.95, ratio_hot_gets=0.95,
             ratio_expirations=0.0,
             expiration=None,
             test_name=None,
             prefix="",
             doc_cache=1,
             use_direct=True,
             collect_server_stats=True,
             start_at= -1,
             report=0,
             ctl=None,
             hot_shift=0,
             is_eperf=False,
             ratio_queries=0,
             queries=0,
             ddoc=None):
        num_items = num_items or self.num_items_loaded

        hot_stack_size = \
            self.parami('hot_stack_size', PerfDefaults.hot_stack_size) or \
            (num_items * ratio_hot)

        cfg = {'max-items': max_items or num_items,
               'max-creates': max_creates or 0,
               'max-ops-per-sec': self.parami("mcsoda_max_ops_sec",
                                              PerfDefaults.mcsoda_max_ops_sec),
               'min-value-size': min_value_size or self.parami("min_value_size",
                                                               1024),
               'exit-after-creates': exit_after_creates,
               'ratio-sets': ratio_sets,
               'ratio-misses': ratio_misses,
               'ratio-creates': ratio_creates,
               'ratio-deletes': ratio_deletes,
               'ratio-hot': ratio_hot,
               'ratio-hot-sets': ratio_hot_sets,
               'ratio-hot-gets': ratio_hot_gets,
               'ratio-expirations': ratio_expirations,
               'ratio-queries': ratio_queries,
               'expiration': expiration or 0,
               'threads': clients,
               'json': int(kind == 'json'),
               'batch': self.parami("batch", PerfDefaults.batch),
               'vbuckets': self.vbucket_count,
               'doc-cache': doc_cache,
               'prefix': prefix,
               'queries': queries,
               'report': report,
               'hot-shift': hot_shift,
               'hot-stack': self.parami("hot_stack", PerfDefaults.hot_stack),
               'hot-stack-size': hot_stack_size,
               'hot-stack-rotate': self.parami("hot_stack_rotate",
                                               PerfDefaults.hot_stack_rotate),
               'cluster_name': self.param("cluster_name", ""),
               'observe': self.param("observe", PerfDefaults.observe),
               'obs-backoff': self.paramf('obs_backoff',
                                          PerfDefaults.obs_backoff),
               'obs-max-backoff': self.paramf('obs_max_backoff',
                                              PerfDefaults.obs_max_backoff),
               'obs-persist-count': self.parami('obs_persist_count',
                                                PerfDefaults.obs_persist_count),
               'obs-repl-count': self.parami('obs_repl_count',
                                             PerfDefaults.obs_repl_count),
               'woq-pattern': self.parami('woq_pattern',
                                         PerfDefaults.woq_pattern),
               'woq-verbose': self.parami('woq_verbose',
                                         PerfDefaults.woq_verbose),
               'cor-pattern': self.parami('cor_pattern',
                                         PerfDefaults.cor_pattern),
               'cor-persist': self.parami('cor_persist',
                                         PerfDefaults.cor_persist),
               'carbon': self.parami('carbon', PerfDefaults.carbon),
               'carbon-server': self.param('carbon_server',
                                           PerfDefaults.carbon_server),
               'carbon-port': self.parami('carbon_port',
                                          PerfDefaults.carbon_port),
               'carbon-timeout': self.parami('carbon_timeout',
                                             PerfDefaults.carbon_timeout),
               'carbon-cache-size': self.parami('carbon_cache_size',
                                                PerfDefaults.carbon_cache_size),
               'time': self.parami('time', 0)}

        cfg_params = cfg.copy()
        cfg_params['test_time'] = time.time()
        cfg_params['test_name'] = test_name
        client_id = ''
        stores = None

        if is_eperf:
            client_id = self.parami("prefix", 0)
        sc = None
        if self.parami("collect_stats", 1):
            sc = self.start_stats(self.spec_reference + ".loop",
                                  test_params=cfg_params, client_id=client_id,
                                  collect_server_stats=collect_server_stats,
                                  ddoc=ddoc)

        self.cur = {'cur-items': num_items}
        if start_at >= 0:
            self.cur['cur-gets'] = start_at
        if num_ops is None:
            num_ops = num_items
        if isinstance(num_ops, int):
            cfg['max-ops'] = num_ops
        else:
            # Here, we num_ops looks like "time to run" tuple of...
            # ('seconds', integer_num_of_seconds_to_run)
            cfg['time'] = num_ops[1]

        # For Black box, multi node tests
        # always use membase-binary
        if self.is_multi_node:
            protocol = self.mk_protocol(host=self.input.servers[0].ip,
                                        port=self.input.servers[0].port)

        self.log.info("mcsoda - protocol %s" % protocol)
        protocol, host_port, user, pswd = \
            self.protocol_parse(protocol, use_direct=use_direct)

        if not user.strip():
            user = self.input.servers[0].rest_username
        if not pswd.strip():
            pswd = self.input.servers[0].rest_password

        self.log.info("mcsoda - %s %s %s %s" %
                      (protocol, host_port, user, pswd))
        self.log.info("mcsoda - cfg: " + str(cfg))
        self.log.info("mcsoda - cur: " + str(self.cur))

        # For query tests always use StoreCouchbase
        if protocol == "couchbase":
            stores = [StoreCouchbase()]

        self.cur, start_time, end_time = \
            self.mcsoda_run(cfg, self.cur, protocol, host_port, user, pswd,
                            stats_collector=sc, ctl=ctl, stores=stores,
                            heartbeat=self.parami("mcsoda_heartbeat", 0),
                            why="loop", bucket=self.param("bucket", "default"))

        ops = {'tot-sets': self.cur.get('cur-sets', 0),
               'tot-gets': self.cur.get('cur-gets', 0),
               'tot-items': self.cur.get('cur-items', 0),
               'tot-creates': self.cur.get('cur-creates', 0),
               'tot-misses': self.cur.get('cur-misses', 0),
               "start-time": start_time,
               "end-time": end_time}

        # Wait until there are no active indexing tasks
        if self.parami('wait_for_indexer', 0):
            ClusterOperationHelper.wait_for_completion(self.rest, 'indexer')

        # Wait until there are no active view compaction tasks
        if self.parami('wait_for_compaction', 0):
            ClusterOperationHelper.wait_for_completion(self.rest,
                                                       'view_compaction')

        if self.parami("loop_wait_until_drained",
                       PerfDefaults.loop_wait_until_drained):
            self.wait_until_drained()

        if self.parami("loop_wait_until_repl",
                       PerfDefaults.loop_wait_until_repl):
            self.wait_until_repl()

        if self.parami("collect_stats", 1) and \
                not self.parami("reb_no_fg", PerfDefaults.reb_no_fg):
            self.end_stats(sc, ops, self.spec_reference + ".loop")

        return ops, start_time, end_time

    def wait_until_drained(self):
        print "[perf.drain] draining disk write queue : %s"\
            % time.strftime(PerfDefaults.strftime)

        master = self.input.servers[0]
        bucket = self.param("bucket", "default")

        RebalanceHelper.wait_for_stats_on_all(master, bucket,
                                              'ep_queue_size', 0,
                                              fn=RebalanceHelper.wait_for_stats_no_timeout)
        RebalanceHelper.wait_for_stats_on_all(master, bucket,
                                              'ep_flusher_todo', 0,
                                              fn=RebalanceHelper.wait_for_stats_no_timeout)

        print "[perf.drain] disk write queue has been drained: %s"\
            % time.strftime(PerfDefaults.strftime)

        return time.time()

    def wait_until_repl(self):
        print "[perf.repl] waiting for replication: %s"\
            % time.strftime(PerfDefaults.strftime)

        master = self.input.servers[0]
        bucket = self.param("bucket", "default")

        RebalanceHelper.wait_for_stats_on_all(master, bucket,
            'vb_replica_queue_size', 0,
            fn=RebalanceHelper.wait_for_stats_no_timeout)

        RebalanceHelper.wait_for_stats_on_all(master, bucket,
            'ep_tap_replica_queue_itemondisk', 0,
            fn=RebalanceHelper.wait_for_stats_no_timeout)

        RebalanceHelper.wait_for_stats_on_all(master, bucket,
            'ep_tap_rebalance_queue_backfillremaining', 0,
            fn=RebalanceHelper.wait_for_stats_no_timeout)

        RebalanceHelper.wait_for_stats_on_all(master, bucket,
            'ep_tap_replica_qlen', 0,
            fn=RebalanceHelper.wait_for_stats_no_timeout)

        print "[perf.repl] replication is done: %s"\
            % time.strftime(PerfDefaults.strftime)

    def warmup(self, collect_stats=True, flush_os_cache=False):
        """
        Restart cluster and wait for it to warm up.
        In current version, affect the master node only.
        """
        if not self.input.servers:
            print "[warmup error] empty server list"
            return

        if collect_stats:
            client_id = self.parami("prefix", 0)
            test_params = {'test_time': time.time(),
                           'test_name': self.id(),
                           'json': 0}
            sc = self.start_stats(self.spec_reference + ".warmup",
                                  test_params=test_params,
                                  client_id=client_id)

        print "[warmup] preparing to warmup cluster ..."

        server = self.input.servers[0]
        shell = RemoteMachineShellConnection(server)

        start_time = time.time()

        print "[warmup] stopping couchbase ... ({0}, {1})"\
            .format(server.ip, time.strftime(PerfDefaults.strftime))
        shell.stop_couchbase()
        print "[warmup] couchbase stopped ({0}, {1})"\
            .format(server.ip, time.strftime(PerfDefaults.strftime))

        if flush_os_cache:
            print "[warmup] flushing os cache ..."
            shell.flush_os_caches()

        shell.start_couchbase()
        print "[warmup] couchbase restarted ({0}, {1})"\
            .format(server.ip, time.strftime(PerfDefaults.strftime))

        self.wait_until_warmed_up()
        print "[warmup] warmup finished"

        end_time = time.time()
        ops = {'tot-sets': 0,
               'tot-gets': 0,
               'tot-items': 0,
               'tot-creates': 0,
               'tot-misses': 0,
               "start-time": start_time,
               "end-time": end_time}

        if collect_stats:
            self.end_stats(sc, ops, self.spec_reference + ".warmup")

    def wait_until_warmed_up(self, master=None):
        if not master:
            master = self.input.servers[0]

        bucket = self.param("bucket", "default")

        fn = RebalanceHelper.wait_for_mc_stats_no_timeout
        for bucket in self.buckets:
            RebalanceHelper.wait_for_stats_on_all(master, bucket,
                                                  'ep_warmup_thread',
                                                  'complete', fn=fn)

    def param(self, name, default_value):
        input = getattr(self, "input", TestInputSingleton.input)
        return input.test_params.get(name, default_value)

    def parami(self, name, default_int):
        return int(self.param(name, default_int))

    def paramf(self, name, default_float):
        return float(self.param(name, default_float))

    def params(self, name, default_str):
        return str(self.param(name, default_str))
示例#24
0
class MultiNodesUpgradeTests(NewUpgradeBaseTest):
    def setUp(self):
        super(MultiNodesUpgradeTests, self).setUp()
        if self.initial_version.startswith("1.6") or self.initial_version.startswith("1.7"):
            self.product = "membase-server"
        else:
            self.product = "couchbase-server"
        self.initial_num_servers = self.input.param("initial_num_servers", 2)

    def tearDown(self):
        super(MultiNodesUpgradeTests, self).tearDown()

    def offline_cluster_upgrade(self):
        self._install(self.servers[: self.initial_num_servers])
        self.operations(multi_nodes=True)
        upgrade_versions = self.input.param("upgrade_version", "2.0.0-1870-rel")
        upgrade_versions = upgrade_versions.split(";")
        self.log.info("Installation done going to sleep for %s sec", self.sleep_time)
        time.sleep(self.sleep_time)
        for upgrade_version in upgrade_versions:
            for server in self.servers[: self.initial_num_servers]:
                remote = RemoteMachineShellConnection(server)
                remote.stop_server()
                time.sleep(self.sleep_time)
                remote.disconnect()
            for server in self.servers[: self.initial_num_servers]:
                remote = RemoteMachineShellConnection(server)
                self._upgrade(upgrade_version, server, remote)
                time.sleep(self.sleep_time)
                remote.disconnect()
            time.sleep(self.expire_time)
            self.num_servers = self.initial_num_servers
            self.verification(multi_nodes=True)

    def online_upgrade_rebalance_in_out(self):
        self._install(self.servers[: self.initial_num_servers])
        self.operations(multi_nodes=True)
        self.log.info("Installation of old version is done. Wait for %s sec for upgrade" % (self.sleep_time))
        time.sleep(self.sleep_time)
        upgrade_version = self.input.param("upgrade_version", "2.0.0-1870-rel")
        self.initial_version = upgrade_version
        self.product = "couchbase-server"
        self._install(self.servers[self.initial_num_servers : self.num_servers])
        self.log.info("Installation of new version is done. Wait for %s sec for rebalance" % (self.sleep_time))
        time.sleep(self.sleep_time)

        servers_in = self.servers[self.initial_num_servers : self.num_servers]
        self.cluster.rebalance(self.servers[: self.initial_num_servers], servers_in, [])
        self.log.info("Rebalance in all 2.0 Nodes")
        time.sleep(self.sleep_time)
        status, content = ClusterHelper.find_orchestrator(self.master)
        self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".format(status, content))
        FIND_MASTER = False
        for new_server in servers_in:
            if content.find(new_server.ip) >= 0:
                FIND_MASTER = True
                self.log.info("2.0 Node %s becomes the master" % (new_server.ip))
        if not FIND_MASTER:
            raise Exception("After rebalance in 2.0 Nodes, 2.0 doesn't become the master")

        servers_out = self.servers[: self.initial_num_servers]
        self.cluster.rebalance(self.servers[: self.num_servers], [], servers_out)
        self.log.info("Rebalance out all old version nodes")
        time.sleep(self.sleep_time)
        self.verify_upgrade_rebalance_in_out()

    def verify_upgrade_rebalance_in_out(self):
        self.master = self.servers[self.initial_num_servers]
        self.rest = RestConnection(self.master)
        self.rest_helper = RestHelper(self.rest)
        for bucket in self.buckets:
            if self.rest_helper.bucket_exists(bucket.name):
                continue
            else:
                raise Exception("bucket:- %s not found" % bucket.name)
        if self.op_types == "bucket":
            bucketinfo = self.rest.get_bucket(bucket.name)
            self.log.info("bucket info :- %s" % bucketinfo)
        if self.op_types == "data":
            self._wait_for_stats_all_buckets(self.servers[self.initial_num_servers : self.num_servers])
            self._verify_all_buckets(self.master, 1, self.wait_timeout * 50, self.max_verify, True, 1)
            self._verify_stats_all_buckets(self.servers[self.initial_num_servers : self.num_servers])

    def online_upgrade_swap_rebalance(self):
        self._install(self.servers[: self.initial_num_servers])
        self.operations(multi_nodes=True)
        self.log.info("Installation of old version is done. Wait for %s sec for upgrade" % (self.sleep_time))
        time.sleep(self.sleep_time)
        upgrade_version = self.input.param("upgrade_version", "2.0.0-1870-rel")
        self.initial_version = upgrade_version
        self.product = "couchbase-server"
        self._install(self.servers[self.initial_num_servers : self.num_servers])
        self.log.info("Installation of new version is done. Wait for %s sec for rebalance" % (self.sleep_time))
        time.sleep(self.sleep_time)

        self.swap_num_servers = self.input.param("swap_num_servers", 1)
        old_servers = self.servers[: self.initial_num_servers]
        new_servers = []
        for i in range(self.initial_num_servers / self.swap_num_servers):
            servers_in = self.servers[
                (self.initial_num_servers + i * self.swap_num_servers) : (
                    self.initial_num_servers + (i + 1) * self.swap_num_servers
                )
            ]
            servers_out = self.servers[(i * self.swap_num_servers) : ((i + 1) * self.swap_num_servers)]
            servers = old_servers + new_servers
            self.cluster.rebalance(servers, servers_in, servers_out)
            self.log.info(
                "Swap rebalance: rebalance out %s old version nodes, rebalance in %s 2.0 Nodes"
                % (self.swap_num_servers, self.swap_num_servers)
            )
            time.sleep(self.sleep_time)
            old_servers = self.servers[((i + 1) * self.swap_num_servers) : self.initial_num_servers]
            new_servers = new_servers + servers_in
            servers = old_servers + new_servers
            status, content = ClusterHelper.find_orchestrator(servers[0])
            self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".format(status, content))
            FIND_MASTER = False
            for new_server in new_servers:
                if content.find(new_server.ip) >= 0:
                    FIND_MASTER = True
                    self.log.info("2.0 Node %s becomes the master" % (new_server.ip))
            if not FIND_MASTER:
                raise Exception("After rebalance in 2.0 nodes, 2.0 doesn't become the master ")

        self.verify_upgrade_rebalance_in_out()