Example #1
0
 def _download_node_cert(self,server):
     cli_command = 'ssl-manage'
     options = "--node-cert-info"
     remote_client = RemoteMachineShellConnection(server)
     output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \
                 options=options, cluster_host=server.ip + ":8091", user=self.ldapUser, password=self.ldapPass)
     return output, error
 def test_oom_kv_restart(self):
     """
     1. Get indexer to OOM.
     2. Stop COuchbase on one of the KV nodes.
     3. Get indexer out of OOM.
     4. Query - Should Fail
     5. Start Couchbase on that KV node.
     6. Query - Should pass
     :return:
     """
     self.assertTrue(self._push_indexer_off_the_cliff(), "OOM Can't be achieved")
     kv_node = self.get_nodes_from_services_map(service_type="kv", get_all_nodes=True)[1]
     log.info("Stopping Couchbase on {0}".format(kv_node.ip))
     remote = RemoteMachineShellConnection(kv_node)
     remote.stop_server()
     for i in range(len(self.load_query_definitions)):
         for bucket in self.buckets:
             log.info("Dropping {0} from bucket {1}".format(self.load_query_definitions[i].index_name, bucket.name))
             self.drop_index(bucket=bucket, query_definition=self.load_query_definitions[i])
             self.sleep(120)
         check = self._validate_indexer_status_oom()
         if not check:
             log.info("Indexer out of OOM...")
             self.load_query_definitions = self.load_query_definitions[i+1:]
             break
         self.sleep(20)
     try:
         self._verify_bucket_count_with_index_count(self.load_query_definitions)
         self.multi_query_using_index(buckets=self.buckets,
                                                   query_definitions=self.load_query_definitions)
     except Exception, ex:
         log.info(str(ex))
Example #3
0
 def changePathWindows(self, path):
     shell = RemoteMachineShellConnection(self.master)
     os_type = shell.extract_remote_info().distribution_type
     self.log.info ("OS type is {0}".format(os_type))
     if os_type == 'windows':
         path = path.replace("/", "\\")
     return path
Example #4
0
 def _retrieve_cluster_cert_extended(self,server):
     cli_command = 'ssl-manage'
     options = "--cluster-cert-info --extended"
     remote_client = RemoteMachineShellConnection(server)
     output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \
                 options=options, cluster_host="localhost", user=self.ldapUser, password=self.ldapPass)
     return output, error
    def rebalance_in_out_at_once_persistence_stopped(self):
        num_nodes_with_stopped_persistence = self.input.param("num_nodes_with_stopped_persistence", 1)
        servs_init = self.servers[:self.nodes_init]
        servs_in = [self.servers[i + self.nodes_init] for i in range(self.nodes_in)]
        servs_out = [self.servers[self.nodes_init - i - 1] for i in range(self.nodes_out)]
        rest = RestConnection(self.master)
        self._wait_for_stats_all_buckets(servs_init)
        for server in servs_init[:min(num_nodes_with_stopped_persistence, self.nodes_init)]:
            shell = RemoteMachineShellConnection(server)
            for bucket in self.buckets:
                shell.execute_cbepctl(bucket, "stop", "", "", "")
        self.sleep(5)
        self.num_items_without_persistence = self.input.param("num_items_without_persistence", 100000)
        gen_extra = BlobGenerator('mike', 'mike-', self.value_size, start=self.num_items / 2\
                                      , end=self.num_items / 2 + self.num_items_without_persistence)
        self.log.info("current nodes : {0}".format([node.id for node in rest.node_statuses()]))
        self.log.info("adding nodes {0} to cluster".format(servs_in))
        self.log.info("removing nodes {0} from cluster".format(servs_out))
        tasks = self._async_load_all_buckets(self.master, gen_extra, "create", 0, batch_size=1000)
        result_nodes = set(servs_init + servs_in) - set(servs_out)
        # wait timeout in 60 min because MB-7386 rebalance stuck
        self.cluster.rebalance(servs_init[:self.nodes_init], servs_in, servs_out, timeout=self.wait_timeout * 60)
        for task in tasks:
            task.result()

        self._wait_for_stats_all_buckets(servs_init[:self.nodes_init - self.nodes_out], \
                                         ep_queue_size=self.num_items_without_persistence * 0.9, ep_queue_size_cond='>')
        self._wait_for_stats_all_buckets(servs_in)
        self._verify_all_buckets(self.master, timeout=None)
        self._verify_stats_all_buckets(result_nodes)
        #verify that curr_items_tot corresponds to sum of curr_items from all nodes
        verified = True
        for bucket in self.buckets:
            verified &= RebalanceHelper.wait_till_total_numbers_match(self.master, bucket)
        self.assertTrue(verified, "Lost items!!! Replication was completed but sum(curr_items) don't match the curr_items_total")
Example #6
0
    def initialize(self, params):
        start_time = time.time()
        server = params["server"]
        remote_client = RemoteMachineShellConnection(params["server"])
        replace_127_0_0_1_cmd = "sed -i 's/127.0.0.1/0.0.0.0/g' {0}".format(
            testconstants.COUCHBASE_SINGLE_DEFAULT_INI_PATH)
        o, r = remote_client.execute_command(replace_127_0_0_1_cmd)
        remote_client.log_command_output(o, r)
        remote_client.stop_couchbase()
        remote_client.start_couchbase()
        remote_client.disconnect()
        couchdb_ok = False

        while time.time() < (start_time + 60):
            try:
                couch_ip = "http://{0}:5984/".format(server.ip)
                log.info("connecting to couch @ {0}".format(couch_ip))
                couch = couchdb.Server(couch_ip)
                couch.config()
                # TODO: verify version number and other properties
                couchdb_ok = True
                break
            except Exception as ex:
                msg = "error happened while creating connection to couchbase single server @ {0} , error : {1}"
                log.error(msg.format(server.ip, ex))
            log.info('sleep for 5 seconds before trying again ...')
            time.sleep(5)
        if not couchdb_ok:
            raise Exception("unable to initialize couchbase single server")
Example #7
0
    def test_AuditEvent(self):
        auditIns = audit(host=self.master)
        ops = self.input.param("ops", None)
        source = 'internal'
        user = '******'
        rest = RestConnection(self.master)
        #status = rest.setAuditSettings(enabled='true')
        auditIns.setAuditEnable('true')
        if (ops in ['enable', 'disable']):
            if ops == 'disable':
                #status = rest.setAuditSettings(enabled='false')
                auditIns.setAuditEnable('false')
            else:
                #status = rest.setAuditSettings(enabled='true')
                auditIns.setAuditEnable('true')

        if ops == 'disable':
            shell = RemoteMachineShellConnection(self.master)
            try:
                result = shell.file_exists(auditIns.getAuditLogPath(), auditIns.AUDITLOGFILENAME)
            finally:
                shell.disconnect()
            self.assertFalse(result, 'Issue with file getting create in new directory')
        else:
            auditIns = audit(host=self.master)
            expectedResults = {"auditd_enabled":auditIns.getAuditStatus(),
                               "descriptors_path":self.changePathWindows(auditIns.getAuditConfigElement('descriptors_path')),
                               "log_path":self.changePathWindows((auditIns.getAuditLogPath())[:-1]), "source":"internal",
                               "user":"******", "rotate_interval":86400, "version":1, 'hostname':self.getHostName(self.master)}
            self.checkConfig(self.AUDITCONFIGRELOAD, self.master, expectedResults)
Example #8
0
    def setUp(self):



        self._cleanup_nodes = []
        self._failed_nodes = []
        super(LWW_EP_Engine, self).setUp()


        # need to enable set drift counter and get adjusted time for the clients. This is only enabled for the
        # XDCR user so we need to do a bit of a hack by using sed to edit the rbac.json file
        # TODO: implement for Windows

        if self.master.ip != '127.0.0.1' and not LWW_EP_Engine.have_modified_rbac_file:
            # first stop the servers
            for s in self.servers:
                self.stop_server(s)

            CMD =  'sed -i -e \'s/"SET_WITH_META",/"SET_WITH_META","SET_DRIFT_COUNTER_STATE","GET_ADJUSTED_TIME",/\' /opt/couchbase/etc/security/rbac.json'
            # do the sed thing
            for s in self.servers:
                shell = RemoteMachineShellConnection(s)
                shell.execute_command(CMD)
            for s in self.servers:
                self.start_server(s)

            LWW_EP_Engine.have_modified_rbac_file = True
Example #9
0
    def test_add_remove_autofailover(self):
        rest = RestConnection(self.master)
        serv_out = self.servers[3]
        shell = RemoteMachineShellConnection(serv_out)
        known_nodes = ['ns_1@'+self.master.ip]

        rest.create_bucket(bucket='default', ramQuotaMB=100)
        rest.update_autofailover_settings(True,30)

        x509main(self.master).setup_master()
        x509main().setup_cluster_nodes_ssl(self.servers[1:4])
        for server in self.servers[1:4]:
            rest.add_node('Administrator','password',server.ip)
            known_nodes.append('ns_1@'+server.ip)

        rest.rebalance(known_nodes)
        self.assertTrue(self.check_rebalance_complete(rest),"Issue with rebalance")

        shell.stop_server()
        self.sleep(60)
        shell.start_server()
        self.sleep(30)
        for server in self.servers:
            status = x509main(server)._validate_ssl_login()
            self.assertEqual(status,200,"Not able to login via SSL code")
Example #10
0
 def test_queries_after_backup_with_2i(self):
     index_name = "Automation_backup_index"
     method_name = self.input.param('to_run', 'test_any')
     self.couchbase_login_info = "%s:%s" % (self.input.membase_settings.rest_username,
                                            self.input.membase_settings.rest_password)
     self.backup_location = self.input.param("backup_location", "/tmp/backup")
     self.command_options = self.input.param("command_options", '')
     index_field = self.input.param("index_field", '')
     self.assertTrue(index_field, "Index field should be provided")
     for bucket in self.bucket:
         self.run_cbq_query(query="CREATE INDEX %s ON %s(%s) USING GSI" % (index_name, bucket.name, ','.join(index_field.split(';'))))
     try:
         shell = RemoteMachineShellConnection(self.master)
         fn = getattr(self, method_name)
         fn()
         self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, self.command_options)
         fn = getattr(self, method_name)
         fn()
         for bucket in self.buckets:
             self.cluster.bucket_flush(self.master, bucket=bucket)
         self.sleep(5, 'wait some time before restore')
         shell.restore_backupFile(self.couchbase_login_info, self.backup_location, [bucket.name for bucket in self.buckets])
         fn = getattr(self, method_name)
         fn()
     finally:
         for bucket in self.buckets:
             self.run_cbq_query(query="DROP INDEX %s.%s" % (bucket.name, index_name))
Example #11
0
    def testClusterInitNegative(self):
        cluster_init_username = self.input.param("cluster_init_username", None)
        cluster_init_password = self.input.param("cluster_init_password", None)
        cluster_init_port = self.input.param("cluster_init_port", None)
        cluster_init_ramsize = self.input.param("cluster_init_ramsize", None)
        command_init = self.input.param("command_init", "cluster-init")
        server = self.servers[-1]
        remote_client = RemoteMachineShellConnection(server)
        rest = RestConnection(server)
        rest.force_eject_node()
        self.sleep(5)

        try:
            cli_command = command_init
            options = ""
            if  cluster_init_username is not None:
                options += "--cluster-init-username={0} ".format(cluster_init_username)
            if cluster_init_password is not None:
                options += "--cluster-init-password={0} ".format(cluster_init_password)
            if cluster_init_port is not None:
                options += "--cluster-init-port={0} ".format(cluster_init_port)
            if cluster_init_ramsize is None:
                options += "--cluster-init-ramsize={0} ".format(cluster_init_ramsize)

            output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user=None, password=None)
            self.assertEqual(output[0], 'ERROR: unable to init localhost (400) Bad Request')
            self.assertTrue(output[1] == "[u'Username and password are required.']" or output[1] == "[u'The password must be at least six characters.']")
            remote_client.disconnect()
        finally:
            rest = RestConnection(server)
            rest.force_eject_node()
            self.sleep(5)
            rest.init_cluster()
Example #12
0
    def _setting_cluster(self, cmd, data_ramsize, index_ramsize, fts_ramsize,
                         cluster_name, cluster_username,
                         cluster_password, cluster_port):
        options = self._get_default_options()
        if cluster_username is not None:
            options += " --cluster-username " + str(cluster_username)
        if cluster_password is not None:
            options += " --cluster-password " + str(cluster_password)
        if data_ramsize:
            options += " --cluster-ramsize " + str(data_ramsize)
        if index_ramsize:
            options += " --cluster-index-ramsize " + str(index_ramsize)
        if fts_ramsize:
            options += " --cluster-fts-ramsize " + str(fts_ramsize)
        if cluster_name:
            options += " --cluster-name " + str(cluster_name)
        if cluster_port:
            options += " --cluster-port " + str(cluster_port)

        remote_client = RemoteMachineShellConnection(self.server)
        stdout, stderr = remote_client.couchbase_cli(cmd, self.hostname,
                                                     options)
        remote_client.disconnect()
        return stdout, stderr, self._was_success(stdout,
                                                 "Cluster settings modified")
Example #13
0
    def initialize(self, params):
#        log = logger.new_logger("Installer")
        start_time = time.time()
        cluster_initialized = False
        server = params["server"]
        remote_client = RemoteMachineShellConnection(params["server"])
        while time.time() < (start_time + (10 * 60)):
            rest = RestConnection(server)
            try:
                rest.init_cluster(username=server.rest_username, password=server.rest_password)
                rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
                if server.data_path:
                    time.sleep(3)
                    # Make sure that data_path is writable by couchbase user
                    #remote_client.stop_couchbase()
                    remote_client.execute_command('rm -rf {0}/*'.format(server.data_path))
                    remote_client.execute_command("chown -R couchbase:couchbase {0}".format(server.data_path))
                    rest.set_data_path(data_path=server.data_path)
                    # Symlink data-dir to custom path
                    #remote_client.execute_command('mv /opt/couchbase/var {0}'.format(server.data_path))
                    #remote_client.execute_command('ln -s {0}/var /opt/couchbase/var'.format(server.data_path))
                    #remote_client.execute_command("chown -h couchbase:couchbase /opt/couchbase/var")
                    #remote_client.start_couchbase()
                    time.sleep(3)
                cluster_initialized = True
                break
            except ServerUnavailableException:
                log.error("error happened while initializing the cluster @ {0}".format(server.ip))
            log.info('sleep for 5 seconds before trying again ...')
            time.sleep(5)
        if not cluster_initialized:
            raise Exception("unable to initialize membase node")
Example #14
0
    def setting_index(self, max_rollbacks, stable_snap_interval,
                      mem_snap_interval, storage_mode, threads,
                      log_level):
        options = self._get_default_options()
        if max_rollbacks:
            options += " --index-max-rollback-points " + str(max_rollbacks)
        if stable_snap_interval:
            options += " --index-stable-snapshot-interval " + str(
                stable_snap_interval)
        if mem_snap_interval:
            options += " --index-memory-snapshot-interval " + str(
                mem_snap_interval)
        if storage_mode:
            options += " --index-storage-setting " + str(storage_mode)
        if threads:
            options += " --index-threads " + str(threads)
        if log_level:
            options += " --index-log-level " + str(log_level)

        remote_client = RemoteMachineShellConnection(self.server)
        stdout, stderr = remote_client.couchbase_cli("setting-index",
                                                     self.hostname, options)
        remote_client.disconnect()
        return stdout, stderr, self._was_success(stdout,
                                                 "Indexer settings modified")
Example #15
0
    def user_manage(self, delete, list, set, ro_username, ro_password):
        options = self._get_default_options()
        if delete:
            options += " --delete "
        if list:
            options += " --list "
        if set:
            options += " --set "
        if ro_username is not None:
            options += " --ro-username " + str(ro_username)
        if ro_password:
            options += " --ro-password " + str(ro_password)

        remote_client = RemoteMachineShellConnection(self.server)
        stdout, stderr = remote_client.couchbase_cli("user-manage",
                                                     self.hostname, options)
        remote_client.disconnect()

        if delete:
            return stdout, stderr, self._was_success(stdout, "Local read-only"
                                                             "user deleted")
        elif set:
            return stdout, stderr, self._was_success(stdout, "Local read-only"
                                                             "user deleted")
        else:
            return stdout, stderr, self._no_error_in_output(stdout)
Example #16
0
 def rebalance_stop(self):
     options = self._get_default_options()
     remote_client = RemoteMachineShellConnection(self.server)
     stdout, stderr = remote_client.couchbase_cli("rebalance-stop",
                                                  self.hostname, options)
     remote_client.disconnect()
     return stdout, stderr, self._was_success(stdout, "Rebalance stopped")
Example #17
0
    def setting_compaction(self, db_frag_perc, db_frag_size, view_frag_perc,
                           view_frag_size, from_period, to_period,
                           abort_outside, parallel_compact, purgeint):
        options = self._get_default_options()
        if db_frag_perc is not None:
            options += " --compaction-db-percentage " + str(db_frag_perc)
        if db_frag_size is not None:
            options += " --compaction-db-size " + str(db_frag_size)
        if view_frag_perc is not None:
            options += " --compaction-view-percentage " + str(view_frag_perc)
        if view_frag_size is not None:
            options += " --compaction-view-size " + str(view_frag_size)
        if from_period is not None:
            options += " --compaction-period-from " + str(from_period)
        if to_period is not None:
            options += " --compaction-period-to " + str(to_period)
        if abort_outside is not None:
            options += " --enable-compaction-abort " + str(abort_outside)
        if parallel_compact is not None:
            options += " --enable-compaction-parallel " + str(parallel_compact)
        if purgeint is not None:
            options += " --metadata-purge-interval " + str(purgeint)

        remote_client = RemoteMachineShellConnection(self.server)
        stdout, stderr = remote_client.couchbase_cli("setting-compaction",
                                                     self.hostname, options)
        remote_client.disconnect()
        return stdout, stderr, self._was_success(stdout, "Compaction "
                                                         "settings modified")
Example #18
0
    def bucket_create(self, name, password, bucket_type, quota,
                      eviction_policy, replica_count, enable_replica_indexes,
                      priority, enable_flush, wait):
        options = self._get_default_options()
        if name is not None:
            options += " --bucket " + name
        if password is not None:
            options += " --bucket-password " + password
        if bucket_type is not None:
            options += " --bucket-type " + bucket_type
        if quota is not None:
            options += " --bucket-ramsize " + str(quota)
        if eviction_policy is not None:
            options += " --bucket-eviction-policy " + eviction_policy
        if replica_count is not None:
            options += " --bucket-replica " + str(replica_count)
        if enable_replica_indexes is not None:
            options += " --enable-index-replica " + str(enable_replica_indexes)
        if priority is not None:
            options += " --bucket-priority " + priority
        if enable_flush is not None:
            options += " --enable-flush " + str(enable_flush)
        if wait:
            options += " --wait"

        remote_client = RemoteMachineShellConnection(self.server)
        stdout, stderr = remote_client.couchbase_cli("bucket-create",
                                                     self.hostname, options)
        remote_client.disconnect()
        return stdout, stderr, self._was_success(stdout, "Bucket created")
Example #19
0
    def cluster_init(self, data_ramsize, index_ramsize, fts_ramsize, services,
                     index_storage_mode, cluster_name,
                     cluster_username, cluster_password, cluster_port):
        options = ""
        if cluster_username:
            options += " --cluster-username " + str(cluster_username)
        if cluster_password:
            options += " --cluster-password " + str(cluster_password)
        if data_ramsize:
            options += " --cluster-ramsize " + str(data_ramsize)
        if index_ramsize:
            options += " --cluster-index-ramsize " + str(index_ramsize)
        if fts_ramsize:
            options += " --cluster-fts-ramsize " + str(fts_ramsize)
        if cluster_name:
            options += " --cluster-name " + str(cluster_name)
        if index_storage_mode:
            options += " --index-storage-setting " + str(index_storage_mode)
        if cluster_port:
            options += " --cluster-port " + str(cluster_port)
        if services:
            options += " --services " + str(services)

        remote_client = RemoteMachineShellConnection(self.server)
        stdout, stderr = remote_client.couchbase_cli("cluster-init",
                                                     self.hostname, options)
        remote_client.disconnect()
        return stdout, stderr, self._was_success(stdout, "Cluster initialized")
Example #20
0
 def test_auto_compaction_with_multiple_buckets(self):
     remote_client = RemoteMachineShellConnection(self.master)
     rest = RestConnection(self.master)
     for bucket in self.buckets:
         if bucket.name == "default":
             self.disable_compaction()
         else:
             self.set_auto_compaction(rest, dbFragmentThresholdPercentage=self.autocompaction_value, bucket=bucket.name)
     self._load_all_buckets(self.master, self.gen_load, "create", 0, 1)
     end_time = time.time() + self.wait_timeout * 30
     for bucket in self.buckets:
         monitor_fragm = self.cluster.async_monitor_db_fragmentation(self.master, self.autocompaction_value, bucket.name)
         while monitor_fragm.state != "FINISHED":
             if end_time < time.time():
                 self.fail("Fragmentation level is not reached in %s sec" % self.wait_timeout * 30)
             try:
                 self._load_all_buckets(self.servers[0], self.gen_update, "update", 0)
             except Exception, ex:
                 self.log.error("Load cannot be performed: %s" % str(ex))
                 self.fail(ex)
         monitor_fragm.result()
         compact_run = remote_client.wait_till_compaction_end(rest, bucket.name,
                                                                  timeout_in_seconds=(self.wait_timeout * 5))
         if compact_run:
             self.log.info("auto compaction run successfully")
Example #21
0
    def print_dataStorage_content(servers):
        """"printout content of data and index path folders"""
        #Determine whether its a cluster_run/not
        cluster_run = True

        firstIp = servers[0].ip
        if len(servers) == 1 and servers[0].port == '8091':
            cluster_run = False
        else:
            for node in servers:
                if node.ip != firstIp:
                    cluster_run = False
                    break

        for serverInfo in servers:
            node = RestConnection(serverInfo).get_nodes_self()
            paths = set([node.storage[0].path, node.storage[0].index_path])
            for path in paths:
                if "c:/Program Files" in path:
                    path = path.replace("c:/Program Files", "/cygdrive/c/Program Files")

                if cluster_run:
                    call(["ls", "-lR", path])
                else:
                    log.info("Total number of files.  No need to printout all "
                             "that flood the test log.")
                    shell = RemoteMachineShellConnection(serverInfo)
                    #o, r = shell.execute_command("ls -LR '{0}'".format(path))
                    o, r = shell.execute_command("wc -l '{0}'".format(path))
                    shell.log_command_output(o, r)
Example #22
0
 def rebalance_in_out_with_auto_DB_compaction(self):
     remote_client = RemoteMachineShellConnection(self.master)
     rest = RestConnection(self.master)
     self.assertTrue(self.num_servers > self.nodes_in + self.nodes_out,
                         "ERROR: Not enough nodes to do rebalance in and out")
     servs_init = self.servers[:self.nodes_init]
     servs_in = [self.servers[i + self.nodes_init] for i in range(self.nodes_in)]
     servs_out = [self.servers[self.nodes_init - i - 1] for i in range(self.nodes_out)]
     result_nodes = set(servs_init + servs_in) - set(servs_out)
     self.set_auto_compaction(rest, dbFragmentThresholdPercentage=self.autocompaction_value)
     self._load_all_buckets(self.master, self.gen_load, "create", 0, 1)
     rebalance = self.cluster.async_rebalance(servs_init, servs_in, servs_out)
     while rebalance.state != "FINISHED":
         self._monitor_DB_fragmentation()
         compact_run = remote_client.wait_till_compaction_end(rest, self.default_bucket_name,
                                                              timeout_in_seconds=(self.wait_timeout * 5))
     rebalance.result()
     monitor_fragm = self.cluster.async_monitor_db_fragmentation(self.master, 0, self.default_bucket_name)
     result = monitor_fragm.result()
     if compact_run:
         self.log.info("auto compaction run successfully")
     elif result:
         self.log.info("Compaction is already completed")
     else:
         self.fail("auto compaction does not run")
     self.verify_cluster_stats(result_nodes)
     remote_client.disconnect()
 def test_upgrade_negative(self):
     op = self.input.param("op", None)
     error = self.input.param("error", '')
     remote = RemoteMachineShellConnection(self.master)
     if op is None:
         self.fail("operation should be specified")
     if op == "higher_version":
         tmp = self.initial_version
         self.initial_version = self.upgrade_versions[0]
         self.upgrade_versions = [tmp, ]
     info = None
     if op == "wrong_arch":
         info = remote.extract_remote_info()
         info.architecture_type = ('x86_64', 'x86')[info.architecture_type == 'x86']
     self._install([self.master])
     self.operations([self.master])
     try:
         if op == "close_port":
             RemoteUtilHelper.enable_firewall(self.master)
         for upgrade_version in self.upgrade_versions:
             self.sleep(self.sleep_time, "Pre-setup of old version is done. Wait for upgrade to {0} version".\
                    format(upgrade_version))
             output, error = self._upgrade(upgrade_version, self.master, info=info)
             if str(output).find(error) != -1 or str(error).find(error) != -1:
                 raise Exception(error)
     except Exception, ex:
         self.log.info("Exception %s appeared as expected" % ex)
         self.log.info("Check that old version is working fine")
         self.verification([self.master])
    def test_upgrade(self):
        self._install([self.master])
        self.operations([self.master])
        for upgrade_version in self.upgrade_versions:
            self.sleep(self.sleep_time, "Pre-setup of old version is done. Wait for upgrade to {0} version".\
                       format(upgrade_version))
            upgrade_threads = self._async_update(upgrade_version, [self.master])
            #wait upgrade statuses
            for upgrade_thread in upgrade_threads:
                upgrade_thread.join()
            success_upgrade = True
            while not self.queue.empty():
                success_upgrade &= self.queue.get()
            if not success_upgrade:
                self.fail("Upgrade failed!")


            self.sleep(self.expire_time)
#            if not self.is_linux:
#                self.wait_node_restarted(self.master, wait_time=1200, wait_if_warmup=True, check_service=True)
            remote = RemoteMachineShellConnection(self.master)
            for bucket in self.buckets:
                remote.execute_cbepctl(bucket, "", "set flush_param", "exp_pager_stime", 5)
            remote.disconnect()
            self.sleep(30)
            self.verification([self.master])
 def offline_cluster_upgrade_with_reinstall(self):
     self._install(self.servers[:self.nodes_init])
     self.operations(self.servers[:self.nodes_init])
     if self.ddocs_num:
         self.create_ddocs_and_views()
     if self.during_ops:
         for opn in self.during_ops:
             getattr(self, opn)()
     num_nodes_reinstall = self.input.param('num_nodes_reinstall', 1)
     stoped_nodes = self.servers[self.nodes_init - (self.nodes_init - num_nodes_reinstall):self.nodes_init]
     nodes_reinstall = self.servers[:num_nodes_reinstall]
     for upgrade_version in self.upgrade_versions:
         self.sleep(self.sleep_time, "Pre-setup of old version is done. Wait for upgrade to {0} version".\
                    format(upgrade_version))
         for server in stoped_nodes:
             remote = RemoteMachineShellConnection(server)
             remote.stop_server()
             remote.disconnect()
         self.sleep(self.sleep_time)
         upgrade_threads = self._async_update(upgrade_version, stoped_nodes)
         self.force_reinstall(nodes_reinstall)
         for upgrade_thread in upgrade_threads:
             upgrade_thread.join()
         success_upgrade = True
         while not self.queue.empty():
             success_upgrade &= self.queue.get()
         if not success_upgrade:
             self.fail("Upgrade failed!")
         self.dcp_rebalance_in_offline_upgrade_from_version2_to_version3()
         self.verification(self.servers[:self.nodes_init])
Example #26
0
    def change_erlang_threads_values(servers, sync_threads=True, num_threads="16:16"):
        """Change the the type of sync erlang threads and its value
           sync_threads=True means sync threads +S with default threads number equal 16:16
           sync_threads=False means async threads: +A 16, for instance

        Default: +S 16:16
        """
        log = logger.Logger.get_logger()
        for server in servers:
            sh = RemoteMachineShellConnection(server)
            product = "membase"
            if sh.is_couchbase_installed():
                product = "couchbase"

            sync_type = sync_threads and "S" or "A"

            command = "sed -i 's/+[A,S] .*/+%s %s \\\/g' /opt/%s/bin/%s-server" % (
                sync_type,
                num_threads,
                product,
                product,
            )
            o, r = sh.execute_command(command)
            sh.log_command_output(o, r)
            msg = "modified erlang +%s to %s for server %s"
            log.info(msg % (sync_type, num_threads, server.ip))
 def offline_cluster_upgrade_and_rebalance(self):
     num_stoped_nodes = self.input.param('num_stoped_nodes', self.nodes_init)
     stoped_nodes = self.servers[self.nodes_init - num_stoped_nodes :self.nodes_init]
     servs_out = self.servers[self.nodes_init - num_stoped_nodes - self.nodes_out :self.nodes_init - num_stoped_nodes]
     servs_in = self.servers[self.nodes_init:self.nodes_init + self.nodes_in]
     self._install(self.servers)
     self.operations(self.servers[:self.nodes_init])
     if self.ddocs_num:
         self.create_ddocs_and_views()
     if self.during_ops:
         for opn in self.during_ops:
             getattr(self, opn)()
     for upgrade_version in self.upgrade_versions:
         self.sleep(self.sleep_time, "Pre-setup of old version is done. Wait for upgrade to {0} version".\
                    format(upgrade_version))
         for server in stoped_nodes:
             remote = RemoteMachineShellConnection(server)
             remote.stop_server()
             remote.disconnect()
         upgrade_threads = self._async_update(upgrade_version, stoped_nodes)
         try:
             self.cluster.rebalance(self.servers[:self.nodes_init], servs_in, servs_out)
         except RebalanceFailedException:
             self.log.info("rebalance failed as expected")
         for upgrade_thread in upgrade_threads:
             upgrade_thread.join()
         success_upgrade = True
         while not self.queue.empty():
             success_upgrade &= self.queue.get()
         if not success_upgrade:
             self.fail("Upgrade failed!")
         ClusterOperationHelper.wait_for_ns_servers_or_assert(stoped_nodes, self)
         self.cluster.rebalance(self.servers[:self.nodes_init], [], servs_out)
         self.dcp_rebalance_in_offline_upgrade_from_version2_to_version3()
         self.verification(list(set(self.servers[:self.nodes_init] + servs_in) - set(servs_out)))
Example #28
0
 def verify_for_recovery_type(self, chosen = [], serverMap = {}, buckets = [], recoveryTypeMap = {}, fileMap = {}, deltaRecoveryBuckets = []):
     """ Verify recovery type is delta or full """
     summary = ""
     logic = True
     for server in self.chosen:
         shell = RemoteMachineShellConnection(serverMap[server.ip])
         os_type = shell.extract_remote_info()
         if os_type.type.lower() == 'windows':
             return
         for bucket in buckets:
             path = fileMap[server.ip][bucket.name]
             exists = shell.file_exists(path,"check.txt")
             if deltaRecoveryBuckets != None:
                 if recoveryTypeMap[server.ip] == "delta" and (bucket.name in deltaRecoveryBuckets) and not exists:
                     logic = False
                     summary += "\n Failed Condition :: node {0}, bucket {1} :: Expected Delta, Actual Full".format(server.ip,bucket.name)
                 elif recoveryTypeMap[server.ip] == "delta" and (bucket.name not in deltaRecoveryBuckets) and exists:
                     summary += "\n Failed Condition :: node {0}, bucket {1} :: Expected Full, Actual Delta".format(server.ip,bucket.name)
                     logic = False
             else:
                 if recoveryTypeMap[server.ip] == "delta"  and not exists:
                     logic = False
                     summary += "\n Failed Condition :: node {0}, bucket {1} :: Expected Delta, Actual Full".format(server.ip,bucket.name)
                 elif recoveryTypeMap[server.ip] == "full" and exists:
                     logic = False
                     summary += "\n Failed Condition :: node {0}, bucket {1}  :: Expected Full, Actual Delta".format(server.ip,bucket.name)
         shell.disconnect()
     self.assertTrue(logic, summary)
    def test_full_eviction_changed_to_value_eviction(self):

        KEY_NAME = 'key1'

        gen_create = BlobGenerator('eviction', 'eviction-', self.value_size, end=self.num_items)
        gen_create2 = BlobGenerator('eviction2', 'eviction2-', self.value_size, end=self.num_items)
        self._load_all_buckets(self.master, gen_create, "create", 0)

        self._wait_for_stats_all_buckets(self.servers[:self.nodes_init])
        self._verify_stats_all_buckets(self.servers[:self.nodes_init])
        remote = RemoteMachineShellConnection(self.master)
        for bucket in self.buckets:
            output, _ = remote.execute_couchbase_cli(cli_command='bucket-edit',
                                                         cluster_host="localhost",
                                                         user=self.master.rest_username,
                                                         password=self.master.rest_password,
                                                         options='--bucket=%s --bucket-eviction-policy=valueOnly' % bucket.name)
            self.assertTrue(' '.join(output).find('SUCCESS') != -1, 'Eviction policy wasn\'t changed')
        ClusterOperationHelper.wait_for_ns_servers_or_assert(
                                            self.servers[:self.nodes_init], self,
                                            wait_time=self.wait_timeout, wait_if_warmup=True)
        self.sleep(10, 'Wait some time before next load')
        #self._load_all_buckets(self.master, gen_create2, "create", 0)
        #import pdb;pdb.set_trace()


        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, 'default')
        mcd = client.memcached(KEY_NAME)
        try:
            rc = mcd.set(KEY_NAME, 0,0, json.dumps({'value':'value2'}))
            self.fail('Bucket is incorrectly functional')
        except MemcachedError, e:
            pass   # this is the exception we are hoping for
Example #30
0
 def rebalance_in_with_DB_time_compaction(self):
     remote_client = RemoteMachineShellConnection(self.master)
     rest = RestConnection(self.master)
     currTime = datetime.datetime.now()
     fromTime = currTime + datetime.timedelta(hours=1)
     toTime = currTime + datetime.timedelta(hours=24)
     self.set_auto_compaction(rest, dbFragmentThresholdPercentage=self.autocompaction_value, allowedTimePeriodFromHour=fromTime.hour,
                              allowedTimePeriodFromMin=fromTime.minute, allowedTimePeriodToHour=toTime.hour, allowedTimePeriodToMin=toTime.minute,
                              allowedTimePeriodAbort="false")
     self._load_all_buckets(self.master, self.gen_load, "create", 0, 1)
     self._monitor_DB_fragmentation()
     for i in xrange(10):
         active_tasks = self.cluster.async_monitor_active_task(self.master, "bucket_compaction", "bucket", wait_task=False)
         for active_task in active_tasks:
             result = active_task.result()
             self.assertTrue(result)
             self.sleep(2)
     currTime = datetime.datetime.now()
     #Need to make it configurable
     newTime = currTime + datetime.timedelta(minutes=5)
     self.set_auto_compaction(rest, dbFragmentThresholdPercentage=self.autocompaction_value, allowedTimePeriodFromHour=currTime.hour,
                              allowedTimePeriodFromMin=currTime.minute, allowedTimePeriodToHour=newTime.hour, allowedTimePeriodToMin=newTime.minute,
                              allowedTimePeriodAbort="false")
     servs_in = self.servers[self.nodes_init:self.nodes_in + 1]
     rebalance = self.cluster.async_rebalance([self.master], servs_in, [])
     compact_run = remote_client.wait_till_compaction_end(rest, self.default_bucket_name,
                                                                  timeout_in_seconds=(self.wait_timeout * 5))
     rebalance.result()
     if compact_run:
         self.log.info("auto compaction run successfully")
     else:
         self.fail("auto compaction does not run")
     remote_client.disconnect()
Example #31
0
class QueryWhitelistTests(QueryTests):
    def setUp(self):
        super(QueryWhitelistTests, self).setUp()
        self.shell = RemoteMachineShellConnection(self.master)
        self.info = self.shell.extract_remote_info()
        if self.info.type.lower() == 'windows':
            self.curl_path = "%scurl" % self.path
            self.file_path = "Filec:\\ProgramFiles\\Couchbase\\Server\\bin\\..\\var\\lib\\couchbase\\n1qlcerts\\curl_whitelist"
            self.lowercase_file_path = "filec:\\ProgramFiles\\Couchbase\\Server\\bin\\..\\var\\lib\\couchbase\\n1qlcerts\\curl_whitelist"
        else:
            self.curl_path = "curl"
            self.file_path = "File/opt/couchbase/bin/../var/lib/couchbase/n1qlcerts/curl_whitelist"
            self.lowercase_file_path = "file/opt/couchbase/bin/../var/lib/couchbase/n1qlcerts/curl_whitelist"
        self.rest = RestConnection(self.master)
        self.cbqpath = '%scbq' % self.path + " -e %s:%s -q -u %s -p %s"\
                                             % (self.master.ip, self.n1ql_port, self.rest.username, self.rest.password)
        #Whitelist error messages
        self.query_error_msg = "Errorevaluatingprojection.-cause:URLendpointisntwhitelistedhttp://%s:%s/query/service." \
                "PleasemakesuretowhitelisttheURLontheUI." % (self.master.ip, self.n1ql_port)
        self.jira_error_msg ="Errorevaluatingprojection.-cause:URLendpointisntwhitelistedhttps://jira.atlassian." \
                             "com/rest/api/latest/issue/JRA-9.PleasemakesuretowhitelisttheURLontheUI."
        self.google_error_msg = "Errorevaluatingprojection.-cause:URLendpointisntwhitelisted" \
                                "https://maps.googleapis.com/maps/api/geocode/json."
        #End of whitelist error messages
        self.query_service_url = "'http://%s:%s/query/service'" % (
            self.master.ip, self.n1ql_port)
        self.api_port = self.input.param("api_port", 8094)
        self.load_sample = self.input.param("load_sample", False)
        self.create_users = self.input.param("create_users", False)
        self.full_access = self.input.param("full_access", True)
        self.run_cbq_query('delete from system:prepareds')

    def suite_setUp(self):
        super(QueryWhitelistTests, self).suite_setUp()
        # Create the users necessary for the RBAC tests in curl
        if self.create_users:
            testuser = [{
                'id': 'no_curl',
                'name': 'no_curl',
                'password': '******'
            }, {
                'id': 'curl',
                'name': 'curl',
                'password': '******'
            }, {
                'id': 'curl_no_insert',
                'name': 'curl_no_insert',
                'password': '******'
            }]
            RbacBase().create_user_source(testuser, 'builtin', self.master)

            noncurl_permissions = 'bucket_full_access[*]:query_select[*]:query_update[*]:' \
                                  'query_insert[*]:query_delete[*]:query_manage_index[*]:' \
                                  'query_system_catalog'
            curl_permissions = 'bucket_full_access[*]:query_select[*]:query_update[*]:' \
                               'query_insert[*]:query_delete[*]:query_manage_index[*]:' \
                               'query_system_catalog:query_external_access'
            # Assign user to role
            role_list = [{
                'id': 'no_curl',
                'name': 'no_curl',
                'roles': '%s' % noncurl_permissions
            }, {
                'id': 'curl',
                'name': 'curl',
                'roles': '%s' % curl_permissions
            }]
            temp = RbacBase().add_user_role(role_list, self.rest, 'builtin')

    def tearDown(self):
        super(QueryWhitelistTests, self).tearDown()

    def suite_tearDown(self):
        super(QueryWhitelistTests, self).suite_tearDown()

    '''Test running a curl command without a whitelist present'''

    def test_no_whitelist(self):
        # The query that curl will send to couchbase
        n1ql_query = 'select * from default limit 5'
        # This is the query that the cbq-engine will execute
        query = "select curl(" + self.query_service_url + \
                ", {'data' : 'statement=%s','user':'******'})" % (
                n1ql_query, self.username, self.password)
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        json_curl = self.convert_to_json(curl)
        self.assertTrue(
            self.query_error_msg in json_curl['errors'][0]['msg'],
            "Error message is %s this is incorrect it should be %s" %
            (json_curl['errors'][0]['msg'], self.query_error_msg))

    '''Test running a curl command with an empty whitelist'''

    def test_empty_whitelist(self):
        response, content = self.rest.create_whitelist(self.master, {})
        result = json.loads(content)
        self.assertEqual(result['errors']['all_access'],
                         'The value must be supplied')
        n1ql_query = 'select * from default limit 5'

        # This is the query that the cbq-engine will execute
        query = "select curl(" + self.query_service_url + \
                ", {'data' : 'statement=%s','user':'******'})" % (
                n1ql_query, self.username, self.password)
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        json_curl = self.convert_to_json(curl)
        self.assertTrue(
            self.query_error_msg in json_curl['errors'][0]['msg'],
            "Error message is %s this is incorrect it should be %s" %
            (json_curl['errors'][0]['msg'], self.query_error_msg))

        self.rest.create_whitelist(self.master, {
            "all_access": None,
            "allowed_urls": None,
            "disallowed_urls": None
        })
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        json_curl = self.convert_to_json(curl)
        self.assertTrue(
            self.query_error_msg in json_curl['errors'][0]['msg'],
            "Error message is %s this is incorrect it should be %s" %
            (json_curl['errors'][0]['msg'], self.query_error_msg))

    '''Test running a curl command with whitelists that are invalid'''

    def test_invalid_whitelist(self):
        response, content = self.rest.create_whitelist(self.master,
                                                       "thisisnotvalid")
        result = json.loads(content)
        self.assertEqual(result['errors']['_'], 'Unexpected Json')
        n1ql_query = 'select * from default limit 5'
        # This is the query that the cbq-engine will execute
        query = "select curl(" + self.query_service_url + \
                ", {'data' : 'statement=%s','user':'******'})" % (
                n1ql_query, self.username, self.password)
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        json_curl = self.convert_to_json(curl)
        self.assertTrue(
            self.query_error_msg in json_curl['errors'][0]['msg'],
            "Error message is %s this is incorrect it should be %s" %
            (json_curl['errors'][0]['msg'], self.query_error_msg))

        self.rest.create_whitelist(
            self.master, {
                "all_access": "hello",
                "allowed_urls": ["goodbye"],
                "disallowed_urls": ["invalid"]
            })
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        json_curl = self.convert_to_json(curl)
        self.assertTrue(
            self.query_error_msg in json_curl['errors'][0]['msg'],
            "Error message is %s this is incorrect it should be %s" %
            (json_curl['errors'][0]['msg'], self.query_error_msg))

    '''Test running a curl command with a whitelist that contains the field all_access: True and also
       inavlid/fake fields'''

    def test_basic_all_access_true(self):
        n1ql_query = 'select * from default limit 5'
        self.rest.create_whitelist(self.master, {"all_access": True})
        query = "select curl(" + self.query_service_url + \
                ", {'data' : 'statement=%s','user':'******'})" % (
                n1ql_query, self.username, self.password)
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        json_curl = self.convert_to_json(curl)
        expected_result = self.run_cbq_query('select * from default limit 5')
        self.assertEqual(json_curl['results'][0]['$1']['results'],
                         expected_result['results'])

        curl_output = self.shell.execute_command(
            "%s https://jira.atlassian.com/rest/api/latest/issue/JRA-9" %
            self.curl_path)
        expected_curl = self.convert_list_to_json(curl_output[0])
        expected_curl['fields']['customfield_10610'] = int(
            expected_curl['fields']['customfield_10610'])
        expected_curl['fields']['comment']['comments'][135]['body'] = \
            expected_curl['fields']['comment']['comments'][135]['body'].replace(u'\xa0', '')
        url = "'https://jira.atlassian.com/rest/api/latest/issue/JRA-9'"
        query = "select curl(" + url + ")"
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertEqual(actual_curl['results'][0]['$1'], expected_curl)

        self.rest.create_whitelist(self.master, {
            "all_access": True,
            "fake_field": "blahahahahaha",
            "fake_url": "fake"
        })

        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertEqual(actual_curl['results'][0]['$1'], expected_curl)

        self.rest.create_whitelist(self.master, {
            "fake_field": "blahahahahaha",
            "all_access": True,
            "fake_url": "fake"
        })

        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertEqual(actual_curl['results'][0]['$1'], expected_curl)

    '''Test all_access: True with nonsense in the allowed/disallowed fields as well as nothing
       in the allowed/disallowed fields'''

    def test_all_access_true(self):
        self.rest.create_whitelist(
            self.master, {
                "all_access": True,
                "allowed_urls": ["blahahahahaha"],
                "disallowed_urls": ["fake"]
            })
        curl_output = self.shell.execute_command(
            "%s https://jira.atlassian.com/rest/api/latest/issue/JRA-9" %
            self.curl_path)
        expected_curl = self.convert_list_to_json(curl_output[0])
        expected_curl['fields']['customfield_10610'] = int(
            expected_curl['fields']['customfield_10610'])
        expected_curl['fields']['comment']['comments'][135]['body'] = \
            expected_curl['fields']['comment']['comments'][135]['body'].replace(u'\xa0', '')
        url = "'https://jira.atlassian.com/rest/api/latest/issue/JRA-9'"
        query = "select curl(" + url + ")"
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertEqual(actual_curl['results'][0]['$1'], expected_curl)

        self.rest.create_whitelist(self.master, {
            "all_access": True,
            "allowed_urls": None,
            "disallowed_urls": None
        })
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertEqual(actual_curl['results'][0]['$1'], expected_curl)

    '''Test what happens if you give an disallowed_url field as well as an all_access field, all_access
       should get precedence over disallowed_urls field'''

    def test_all_access_true_disallowed_url(self):
        self.rest.create_whitelist(
            self.master, {
                "all_access": True,
                "disallowed_urls": ["https://maps.googleapis.com"]
            })
        curl_output = self.shell.execute_command(
            "%s --get https://maps.googleapis.com/maps/api/geocode/json "
            "-d 'address=santa+cruz&components=country:ES&key=AIzaSyCT6niGCMsgegJkQSYSqpoLZ4_rSO59XQQ'"
            % self.curl_path)
        expected_curl = self.convert_list_to_json(curl_output[0])
        url = "'https://maps.googleapis.com/maps/api/geocode/json'"
        options = "{'get':True,'data': 'address=santa+cruz&components=country:ES&key=AIzaSyCT6niGCMsgegJkQSYSqpoLZ4_rSO59XQQ'}"
        query = "select curl(" + url + ", %s" % options + ")"
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertEqual(actual_curl['results'][0]['$1'], expected_curl)

    '''Test what happens if you give an allowed_url field as well as an all_access field, all_access
       should get precedence over allowed_urls field'''

    def test_all_access_true_allowed_url(self):
        self.rest.create_whitelist(
            self.master, {
                "all_access": True,
                "allowed_urls": ["https://maps.googleapis.com"]
            })
        curl_output = self.shell.execute_command(
            "%s https://jira.atlassian.com/rest/api/latest/issue/JRA-9" %
            self.curl_path)
        expected_curl = self.convert_list_to_json(curl_output[0])
        expected_curl['fields']['customfield_10610'] = int(
            expected_curl['fields']['customfield_10610'])
        expected_curl['fields']['comment']['comments'][135]['body'] = \
            expected_curl['fields']['comment']['comments'][135]['body'].replace(u'\xa0', '')
        url = "'https://jira.atlassian.com/rest/api/latest/issue/JRA-9'"
        query = "select curl(" + url + ")"
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertEqual(actual_curl['results'][0]['$1'], expected_curl)

    '''Test what happens when you set the all_access field multiple times, or try and give it multiple
       values'''

    def test_multiple_all_access(self):
        self.rest.create_whitelist(self.master, {
            "all_access": True,
            "all_access": False
        })

        curl_output = self.shell.execute_command(
            "%s https://jira.atlassian.com/rest/api/latest/issue/JRA-9" %
            self.curl_path)
        expected_curl = self.convert_list_to_json(curl_output[0])
        expected_curl['fields']['customfield_10610'] = int(
            expected_curl['fields']['customfield_10610'])
        expected_curl['fields']['comment']['comments'][135]['body'] = \
            expected_curl['fields']['comment']['comments'][135]['body'].replace(u'\xa0', '')
        url = "'https://jira.atlassian.com/rest/api/latest/issue/JRA-9'"
        query = "select curl(" + url + ")"
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertTrue(
            self.jira_error_msg in actual_curl['errors'][0]['msg'],
            "Error message is %s this is incorrect it should be %s" %
            (actual_curl['errors'][0]['msg'], self.jira_error_msg))

        self.rest.create_whitelist(self.master, {
            "all_access": False,
            "all_access": True
        })
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertEqual(actual_curl['results'][0]['$1'], expected_curl)

        self.rest.create_whitelist(self.master, {"all_access": [True, False]})
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertEqual(actual_curl['results'][0]['$1'], expected_curl)

    '''Test to make sure that whitelist enforces that allowed_urls field must be given as a list'''

    def test_invalid_allowed_url(self):
        self.rest.create_whitelist(self.master, {"all_access": False})
        # Whitelist should not accept this setting and thus leave the above settting of all_access = False intact
        response, content = self.rest.create_whitelist(
            self.master, {
                "all_access": False,
                "allowed_urls": "blahblahblah"
            })
        result = json.loads(content)
        self.assertEqual(result['errors']['allowed_urls'],
                         "Must be an array of non-empty strings")
        n1ql_query = 'select * from default limit 5'
        # This is the query that the cbq-engine will execute
        query = "select curl(" + self.query_service_url + \
                ", {'data' : 'statement=%s','user':'******'})" % (
                n1ql_query, self.username, self.password)
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        json_curl = self.convert_to_json(curl)
        self.assertTrue(
            self.query_error_msg in json_curl['errors'][0]['msg'],
            "Error message is %s this is incorrect it should be %s" %
            (json_curl['errors'][0]['msg'], self.query_error_msg))

    '''Test the allowed_urls field, try to run curl against an endpoint not in allowed_urls and then
       try to run curl against an endpoint in allowed_urls'''

    def test_allowed_url(self):
        self.rest.create_whitelist(
            self.master, {
                "all_access": False,
                "allowed_urls": ["https://maps.googleapis.com"]
            })

        url = "'https://jira.atlassian.com/rest/api/latest/issue/JRA-9'"
        query = "select curl(" + url + ")"
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertTrue(
            self.jira_error_msg in actual_curl['errors'][0]['msg'],
            "Error message is %s this is incorrect it should be %s" %
            (actual_curl['errors'][0]['msg'], self.jira_error_msg))

        curl_output = self.shell.execute_command(
            "%s --get https://maps.googleapis.com/maps/api/geocode/json "
            "-d 'address=santa+cruz&components=country:ES&key=AIzaSyCT6niGCMsgegJkQSYSqpoLZ4_rSO59XQQ'"
            % self.curl_path)
        expected_curl = self.convert_list_to_json(curl_output[0])
        url = "'https://maps.googleapis.com/maps/api/geocode/json'"
        options = "{'get':True,'data': 'address=santa+cruz&components=country:ES&key=AIzaSyCT6niGCMsgegJkQSYSqpoLZ4_rSO59XQQ'}"
        query = "select curl(" + url + ", %s" % options + ")"
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertEqual(actual_curl['results'][0]['$1'], expected_curl)

    '''Test the allowed_urls field, try to run curl against an endpoint not in disallowed_urls and then
       try to run curl against an endpoint in disallowed_urls, both should fail'''

    def test_disallowed_url(self):
        self.rest.create_whitelist(
            self.master, {
                "all_access": False,
                "disallowed_urls": ["https://maps.googleapis.com"]
            })

        url = "'https://jira.atlassian.com/rest/api/latest/issue/JRA-9'"
        query = "select curl(" + url + ")"
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertTrue(
            self.jira_error_msg in actual_curl['errors'][0]['msg'],
            "Error message is %s this is incorrect it should be %s" %
            (actual_curl['errors'][0]['msg'], self.jira_error_msg))

        url = "'https://maps.googleapis.com/maps/api/geocode/json'"
        options = "{'get':True,'data': 'address=santa+cruz&components=country:ES&key=AIzaSyCT6niGCMsgegJkQSYSqpoLZ4_rSO59XQQ'}"
        query = "select curl(" + url + ", %s" % options + ")"
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertTrue(
            self.google_error_msg in actual_curl['errors'][0]['msg'],
            "Error message is %s this is incorrect it should be %s" %
            (actual_curl['errors'][0]['msg'], self.google_error_msg))

    '''Test that disallowed_urls field has precedence over allowed_urls'''

    def test_disallowed_precedence(self):
        self.rest.create_whitelist(
            self.master, {
                "all_access":
                False,
                "allowed_urls":
                ["https://maps.googleapis.com/maps/api/geocode/json"],
                "disallowed_urls": ["https://maps.googleapis.com"]
            })

        url = "'https://maps.googleapis.com/maps/api/geocode/json'"
        options = "{'get':True,'data': 'address=santa+cruz&components=country:ES&key=AIzaSyCT6niGCMsgegJkQSYSqpoLZ4_rSO59XQQ'}"
        query = "select curl(" + url + ", %s" % options + ")"
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertTrue(
            self.google_error_msg in actual_curl['errors'][0]['msg'],
            "Error message is %s this is incorrect it should be %s" %
            (actual_curl['errors'][0]['msg'], self.google_error_msg))

        self.rest.create_whitelist(
            self.master, {
                "all_access":
                False,
                "allowed_urls":
                ["https://maps.googleapis.com/maps/api/geocode/json"],
                "disallowed_urls":
                ["https://maps.googleapis.com/maps/api/geocode/json"]
            })
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertTrue(
            self.google_error_msg in actual_curl['errors'][0]['msg'],
            "Error message is %s this is incorrect it should be %s" %
            (actual_curl['errors'][0]['msg'], self.google_error_msg))

    '''Test valid allowed with an invalid disallowed'''

    def test_allowed_invalid_disallowed(self):
        self.rest.create_whitelist(
            self.master, {
                "all_access": False,
                "allowed_urls": ["https://maps.googleapis.com"],
                "disallowed_urls": ["blahblahblah"]
            })

        curl_output = self.shell.execute_command(
            "%s https://jira.atlassian.com/rest/api/latest/issue/JRA-9" %
            self.curl_path)
        expected_curl = self.convert_list_to_json(curl_output[0])
        url = "'https://jira.atlassian.com/rest/api/latest/issue/JRA-9'"
        query = "select curl(" + url + ")"
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertTrue(
            self.jira_error_msg in actual_curl['errors'][0]['msg'],
            "Error message is %s this is incorrect it should be %s" %
            (actual_curl['errors'][0]['msg'], self.jira_error_msg))

        curl_output = self.shell.execute_command(
            "%s --get https://maps.googleapis.com/maps/api/geocode/json "
            "-d 'address=santa+cruz&components=country:ES&key=AIzaSyCT6niGCMsgegJkQSYSqpoLZ4_rSO59XQQ'"
            % self.curl_path)
        expected_curl = self.convert_list_to_json(curl_output[0])
        url = "'https://maps.googleapis.com/maps/api/geocode/json'"
        options = "{'get':True,'data': 'address=santa+cruz&components=country:ES&key=AIzaSyCT6niGCMsgegJkQSYSqpoLZ4_rSO59XQQ'}"
        query = "select curl(" + url + ", %s" % options + ")"
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertEqual(actual_curl['results'][0]['$1'], expected_curl)

    '''Test a valid disallowed with an invalid allowed'''

    def test_disallowed_invalid_allowed(self):
        self.rest.create_whitelist(
            self.master, {
                "all_access": False,
                "allowed_urls": ["blahblahblah"],
                "disallowed_urls": ["https://maps.googleapis.com"]
            })
        url = "'https://maps.googleapis.com/maps/api/geocode/json'"
        options = "{'get':True,'data': 'address=santa+cruz&components=country:ES&key=AIzaSyCT6niGCMsgegJkQSYSqpoLZ4_rSO59XQQ'}"
        query = "select curl(" + url + ", %s" % options + ")"
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertTrue(
            self.google_error_msg in actual_curl['errors'][0]['msg'],
            "Error message is %s this is incorrect it should be %s" %
            (actual_curl['errors'][0]['msg'], self.google_error_msg))

        response, content = self.rest.create_whitelist(
            self.master, {
                "all_access": False,
                "allowed_urls": "blahblahblah",
                "disallowed_urls": ["https://maps.googleapis.com"]
            })
        result = json.loads(content)
        self.assertEqual(result['errors']['allowed_urls'],
                         "Must be an array of non-empty strings")
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        actual_curl = self.convert_to_json(curl)
        self.assertTrue(
            self.google_error_msg in actual_curl['errors'][0]['msg'],
            "Error message is %s this is incorrect it should be %s" %
            (actual_curl['errors'][0]['msg'], self.google_error_msg))

    def test_invalid_disallowed_url_validation(self):
        response, content = self.rest.create_whitelist(
            self.master, {
                "all_access": False,
                "disallowed_urls": "blahblahblahblahblah"
            })
        result = json.loads(content)
        self.assertEqual(result['errors']['disallowed_urls'],
                         "Must be an array of non-empty strings")

    '''Should not be able to curl localhost even if you are on the localhost unless whitelisted'''

    def test_localhost(self):
        self.rest.create_whitelist(self.master, {"all_access": False})
        error_msg ="Errorevaluatingprojection.-cause:URLendpointisntwhitelistedhttp://localhost:8093/query/service." \
                   "PleasemakesuretowhitelisttheURLontheUI."

        n1ql_query = 'select * from default limit 5'
        query = "select curl('http://localhost:8093/query/service', {'data' : 'statement=%s'," \
                "'user':'******'})" % (n1ql_query, self.username, self.password)
        curl = self.shell.execute_commands_inside(self.cbqpath, query, '', '',
                                                  '', '', '')
        json_curl = self.convert_to_json(curl)
        self.assertTrue(
            error_msg in json_curl['errors'][0]['msg'],
            "Error message is %s this is incorrect it should be %s" %
            (json_curl['errors'][0]['msg'], error_msg))
Example #32
0
    def test_restart_node_with_full_disk(self):
        def _get_disk_usage_percentage(remote_client):
            disk_info = remote_client.get_disk_info()
            percentage = disk_info[1] + disk_info[2]
            for item in percentage.split():
                if "%" in item:
                    self.log.info("disk usage {0}".format(item))
                    return item[:-1]

        remote_client = RemoteMachineShellConnection(self.master)
        output, error = remote_client.execute_command_raw("rm -rf full_disk*",
                                                          use_channel=True)
        remote_client.log_command_output(output, error)
        percentage = _get_disk_usage_percentage(remote_client)
        try:
            while int(percentage) < 95:
                output, error = remote_client.execute_command(
                    "dd if=/dev/zero of=full_disk{0} bs=3G count=1".format(
                        percentage + str(time.time())),
                    use_channel=True)
                remote_client.log_command_output(output, error)
                percentage = _get_disk_usage_percentage(remote_client)
            processes1 = remote_client.get_running_processes()
            output, error = remote_client.execute_command(
                "/etc/init.d/couchbase-server restart", use_channel=True)
            remote_client.log_command_output(output, error)
        finally:
            output, error = remote_client.execute_command_raw(
                "rm -rf full_disk*", use_channel=True)
            remote_client.log_command_output(output, error)
            remote_client.disconnect()
Example #33
0
    def _load_dgm(self):
        generate_load = BlobGenerator('nosql',
                                      'nosql-',
                                      self.value_size,
                                      end=self.num_items)
        self._load_all_buckets(self.master,
                               generate_load,
                               "create",
                               0,
                               1,
                               0,
                               True,
                               batch_size=20000,
                               pause_secs=5,
                               timeout_secs=180)
        self.load_gen_list.append(generate_load)

        stats_all_buckets = {}
        for bucket in self.buckets:
            stats_all_buckets[bucket.name] = StatsCommon()

        for bucket in self.buckets:
            threshold_reached = False
            while not threshold_reached:
                for server in self.servers:
                    active_resident = stats_all_buckets[bucket.name].get_stats(
                        [server], bucket, '',
                        'vb_active_perc_mem_resident')[server]
                    if int(active_resident) > self.active_resident_threshold:
                        self.log.info(
                            "resident ratio is %s greater than %s for %s in bucket %s. Continue loading to the cluster"
                            % (active_resident, self.active_resident_threshold,
                               server.ip, bucket.name))
                        random_key = key_generator()
                        generate_load = BlobGenerator(random_key,
                                                      '%s-' % random_key,
                                                      self.value_size,
                                                      end=self.num_items)
                        self._load_all_buckets(self.master,
                                               generate_load,
                                               "create",
                                               0,
                                               1,
                                               0,
                                               True,
                                               batch_size=20000,
                                               pause_secs=5,
                                               timeout_secs=180)
                        self.load_gen_list.append(generate_load)
                    else:
                        threshold_reached = True
                        self.log.info(
                            "DGM state achieved for %s in bucket %s!" %
                            (server.ip, bucket.name))
                        break

        if (self.doc_ops is not None):
            if ("update" in self.doc_ops):
                for gen in self.load_gen_list[:int(
                        len(self.load_gen_list) * 0.5)]:
                    self._load_all_buckets(self.master,
                                           gen,
                                           "update",
                                           0,
                                           1,
                                           0,
                                           True,
                                           batch_size=20000,
                                           pause_secs=5,
                                           timeout_secs=180)
            if ("delete" in self.doc_ops):
                for gen in self.load_gen_list[
                        int(len(self.load_gen_list) * 0.5):]:
                    self._load_all_buckets(self.master,
                                           gen,
                                           "delete",
                                           0,
                                           1,
                                           0,
                                           True,
                                           batch_size=20000,
                                           pause_secs=5,
                                           timeout_secs=180)
            if ("expire" in self.doc_ops):
                for gen in self.load_gen_list[:int(
                        len(self.load_gen_list) * 0.8)]:
                    self._load_all_buckets(self.master,
                                           gen,
                                           "update",
                                           self.expire_time,
                                           1,
                                           0,
                                           True,
                                           batch_size=20000,
                                           pause_secs=5,
                                           timeout_secs=180)
                time.sleep(self.expire_time * 2)

                for server in self.servers:
                    shell = RemoteMachineShellConnection(server)
                    for bucket in self.buckets:
                        shell.execute_cbepctl(bucket, "", "set flush_param",
                                              "exp_pager_stime", 5)
                    shell.disconnect()
                time.sleep(30)
Example #34
0
    def test_cancel_ddl_link_disconnect(self):
        """
        Cover's the scenario: Cancel link disconnect DDL statement
        Expected Behaviour: Request sent will now either succeed or fail, or its connection will be abruptly closed

        steps:
        """
        self.log.info("Create dataset")
        self.cbas_util.create_dataset_on_bucket(self.cb_bucket_name,
                                                self.cbas_dataset_name)

        self.log.info("Connect to Local link")
        self.assertTrue(self.cbas_util.connect_link(),
                        msg="Connect link failed. Might be a product bug")

        self.log.info("Assert document count")
        self.assertTrue(self.cbas_util.validate_cbas_dataset_items_count(
            self.cbas_dataset_name, self.num_items),
                        msg="Count mismatch on CBAS")

        link_disconnected = 0
        link_not_disconnected = 0
        times = 0
        start_time = time.time()
        while time.time() < start_time + 60:
            times += 1

            self.log.info("Connect link")
            self.assertTrue(self.cbas_util.connect_link(),
                            msg="Connect link failed. Might be a product bug")

            self.log.info(
                "Pick a time window between 0 - 500ms for killing of node")
            self.kill_window = random.randint(0, 500) / 1000.0

            self.log.info("Pick the cbas node to kill java process")
            server_to_kill_java = self.analytics_servers[random.randint(0, 2)]
            shell = RemoteMachineShellConnection(server_to_kill_java)

            self.log.info("Pick the java process id to kill")
            java_process_id, _ = shell.execute_command("pgrep java")

            # Run the task Connect link/Sleep window/Kill Java process in parallel
            self.log.info("Disconnect Link")
            tasks = self.cbas_util.async_query_execute("disconnect link Local",
                                                       "immediate", 1)

            self.log.info("Sleep for the window time")
            self.sleep(self.kill_window)

            self.log.info("kill Java process")
            if len(java_process_id):
                shell.execute_command("kill -9 %s" % (java_process_id[0]))
            else:
                # In case of windows
                shell.kill_java()

            self.log.info("Fetch task result")
            for task in tasks:
                task.get_result()

            self.log.info(
                "Wait for request to complete and cluster to be active: Using private ping() function"
            )
            cluster_recover_start_time = time.time()
            while time.time() < cluster_recover_start_time + 180:
                try:
                    status, metrics, _, cbas_result, _ = self.cbas_util.execute_statement_on_cbas_util(
                        "set `import-private-functions` `true`;ping()")
                    if status == "success":
                        break
                except:
                    self.sleep(2, message="Wait for service to up again")

            self.log.info(
                "Request sent will now either succeed or fail, or its connection will be abruptly closed. Verify the state"
            )
            status, content, _ = self.cbas_util.fetch_bucket_state_on_cbas()
            self.assertTrue(status, msg="Fetch bucket state failed")
            content = json.loads(content)
            if content['buckets'][0]['state'] == "disconnected":
                link_disconnected += 1
                status, _, _, _, _ = self.cbas_util.execute_statement_on_cbas_util(
                    "select count(*) from %s" % self.cbas_dataset_name)
                self.assertTrue(status == "success", "Select query failed")
            else:
                link_not_disconnected += 1
                self.assertTrue(
                    self.cbas_util.validate_cbas_dataset_items_count(
                        self.cbas_dataset_name, self.num_items),
                    msg="Count mismatch on CBAS")

            # Let's break out as soon as one DDL is cancelled
            if link_disconnected != link_not_disconnected and link_disconnected > 0 and link_not_disconnected > 0:
                break

        self.log.info("Test run summary")
        self.log.info("Times ran %d" % times)
        self.log.info("link Local was disconnected %d times" %
                      link_disconnected)
        self.log.info("link Local was not disconnected %d times" %
                      link_not_disconnected)
Example #35
0
    def test_cancel_ddl_index_drop(self):
        """
        Cover's the scenario: Cancel drop index DDL statement
        Expected Behaviour: Request sent will now either succeed or fail, or its connection will be abruptly closed

        steps:
        1.  Add all CBAS nodes(At least 3)
        2.  Create connection to KV bucket
        3.  Load documents in KV
        4.  Create dataset
        5.  Connect link Local
        5.  Assert on document count
        7.  Disconnect link
        8.  Drop index
        9.  Pick a time window to sleep before killing Java service
        10. Pick a CBAS node to kill
        11. Pick the Java process id on CBAS node selected in previous step
        12. Create sec index/sleep/kill java process
        13. Wait for service to be up using ping function
        14. Check if index was was created
        15. Repeat step 4 - 11 for a period of 10 minutes
        16. Make sure we see at least a few drop index statements fail
        """

        self.log.info("Create dataset")
        self.cbas_util.create_dataset_on_bucket(self.cb_bucket_name,
                                                self.cbas_dataset_name)

        sec_idx_dropped = 0
        sec_idx_not_dropped = 0
        times = 0
        start_time = time.time()
        while time.time() < start_time + 600:
            times += 1

            self.log.info("Create secondary index")
            self.cbas_util.execute_statement_on_cbas_util(
                "create index idx_age on ds(age:int)")

            self.log.info("Connect to Local link")
            self.assertTrue(self.cbas_util.connect_link(),
                            msg="Connect link failed. Might be a product bug")

            self.log.info("Assert document count")
            self.assertTrue(self.cbas_util.validate_cbas_dataset_items_count(
                self.cbas_dataset_name, self.num_items),
                            msg="Count mismatch on CBAS")

            self.log.info("Disconnect link")
            self.assertTrue(
                self.cbas_util.disconnect_link(),
                msg="Disconnect link failed. Might be a product bug")

            self.log.info(
                "Pick a time window between 0 - 100ms for killing of node")
            self.kill_window = random.randint(0, 100) / 1000.0

            self.log.info("Pick the cbas node to kill java process")
            server_to_kill_java = self.analytics_servers[random.randint(0, 2)]
            shell = RemoteMachineShellConnection(server_to_kill_java)

            self.log.info("Pick the java process id to kill")
            java_process_id, _ = shell.execute_command("pgrep java")

            # Run the task Drop sec index/Sleep window/Kill Java process in parallel
            self.log.info("Create index")
            tasks = self.cbas_util.async_query_execute(
                "drop index %s.%s" % (self.cbas_dataset_name, "sec_idx"),
                "immediate", 1)

            self.log.info("Sleep for the window time")
            self.sleep(self.kill_window)

            self.log.info("kill Java process")
            if len(java_process_id):
                shell.execute_command("kill -9 %s" % (java_process_id[0]))
            else:
                # In case of windows
                shell.kill_java()

            self.log.info("Fetch task result")
            for task in tasks:
                task.get_result()

            self.log.info(
                "Wait for request to complete and cluster to be active: Using private ping() function"
            )
            cluster_recover_start_time = time.time()
            while time.time() < cluster_recover_start_time + 180:
                try:
                    status, metrics, _, cbas_result, _ = self.cbas_util.execute_statement_on_cbas_util(
                        "set `import-private-functions` `true`;ping();")
                    if status == "success":
                        break
                except:
                    self.sleep(2, message="Wait for service to up again")

            self.log.info(
                "Request sent will now either succeed or fail, or its connection will be abruptly closed. Verify the state"
            )
            status, metrics, _, cbas_result, _ = self.cbas_util.execute_statement_on_cbas_util(
                'select value count(*) from Metadata.`Index` where IndexName = "sec_idx"'
            )
            self.assertEquals(status, "success", msg="CBAS query failed")
            self.log.info(cbas_result)
            if cbas_result[0] == 0:
                sec_idx_dropped += 1
                status, _, _, _, _ = self.cbas_util.execute_statement_on_cbas_util(
                    "select age from %s" % self.cbas_dataset_name)
                self.assertTrue(status == "success", "Select query failed")
            else:
                sec_idx_not_dropped += 1
                status, _, _, _, _ = self.cbas_util.execute_statement_on_cbas_util(
                    "select age from %s" % self.cbas_dataset_name)
                self.assertTrue(status == "success", "Select query failed")

            # Let's break out as soon as one DDL is cancelled
            if sec_idx_dropped != sec_idx_not_dropped and sec_idx_dropped > 0 and sec_idx_not_dropped > 0:
                break

        self.log.info("Test run summary")
        self.log.info("Times ran %d" % times)
        self.log.info("Secondary index %s was dropped %d times" %
                      (self.cbas_dataset_name, sec_idx_dropped))
        self.log.info("Secondary index %s was not dropped %d times" %
                      (self.cbas_dataset_name, sec_idx_not_dropped))
Example #36
0
    def test_cancel_ddl_dataset_create(self):
        """
        Cover's the scenario: Cancel create dataset DDL statement
        Expected Behaviour: Request sent will now either succeed or fail, or its connection will be abruptly closed

        steps:
        1.  Add all CBAS nodes(At least 3)
        2.  Create connection to KV bucket
        3.  Load documents in KV
        4.  Disconnect link Local
        5.  Drop dataset if exist
        6.  Pick a time window to sleep before killing Java service
        7.  Pick a CBAS node to kill
        8.  Pick the Java process id on CBAS node selected in previous step
        9.  Create dataset/sleep/kill java process
        10. Wait for service to be up using ping function
        11. Check if dataset is created
        12. Repeat step 4 - 11 for a period of 10 minutes
        13. Make sure we see at least a few create dataset statements fail(No dataset created in step 11)
        """
        dataset_created = 0
        dataset_not_created = 0
        times = 0
        start_time = time.time()
        while time.time() < start_time + 600:
            times += 1

            self.log.info("Disconnect link")
            self.assertTrue(
                self.cbas_util.disconnect_link(),
                msg="Disconnect link failed. Might be a product bug")

            self.log.info("Drop dataset if exists")
            status, metrics, _, cbas_result, _ = self.cbas_util.execute_statement_on_cbas_util(
                "drop dataset %s if exists" % self.cbas_dataset_name)
            self.assertEquals(status, "success", msg="Drop dataset failed")
            self.log.info(cbas_result)

            self.log.info(
                "Pick a time window between 0 - 50ms for killing of node")
            self.kill_window = random.randint(0, 50) / 1000.0

            self.log.info("Pick the cbas node to kill java process")
            server_to_kill_java = self.analytics_servers[random.randint(0, 2)]
            shell = RemoteMachineShellConnection(server_to_kill_java)

            self.log.info("Pick the java process id to kill")
            java_process_id, _ = shell.execute_command("pgrep java")

            # Run the task Create dataset/Sleep window/Kill Java process in parallel
            self.log.info("Create dataset")
            tasks = self.cbas_util.async_query_execute(
                "create dataset %s on %s" %
                (self.cbas_dataset_name, self.cb_bucket_name), "immediate", 1)

            self.log.info("Sleep for the window time")
            self.sleep(self.kill_window)

            self.log.info("kill Java process")
            if len(java_process_id):
                shell.execute_command("kill -9 %s" % (java_process_id[0]))
            else:
                # In case of windows
                shell.kill_java()

            self.log.info("Fetch task result")
            for task in tasks:
                task.get_result()

            self.log.info(
                "Wait for request to complete and cluster to be active: Using private ping() function"
            )
            cluster_recover_start_time = time.time()
            while time.time() < cluster_recover_start_time + 180:
                try:
                    status, metrics, _, cbas_result, _ = self.cbas_util.execute_statement_on_cbas_util(
                        "set `import-private-functions` `true`;ping()")
                    if status == "success":
                        break
                except:
                    self.sleep(2, message="Wait for service to up again")

            self.log.info("Check DDL create dataset status")
            status, metrics, _, cbas_result, _ = self.cbas_util.execute_statement_on_cbas_util(
                'select value count(*) from Metadata.`Dataset` d WHERE d.DataverseName <> "Metadata" and DatasetName = "ds"'
            )
            self.assertEquals(status, "success", msg="CBAS query failed")
            self.log.info(cbas_result)
            if cbas_result[0] == 1:
                dataset_created += 1
                self.assertTrue(self.cbas_util.connect_link(),
                                msg="Connect link Failed")
                self.assertTrue(
                    self.cbas_util.validate_cbas_dataset_items_count(
                        self.cbas_dataset_name, self.num_items),
                    msg="Count mismatch on CBAS")
            else:
                dataset_not_created += 1

            # Let's break out as soon as one DDL is cancelled
            if dataset_created != dataset_not_created and dataset_created > 0 and dataset_not_created > 0:
                break

        self.log.info("Test run summary")
        self.log.info("Times ran: %d " % times)
        self.log.info("Dataset %s was created %d times" %
                      (self.cbas_dataset_name, dataset_created))
        self.log.info("Dataset %s was not created %d times" %
                      (self.cbas_dataset_name, dataset_not_created))
Example #37
0
    def setUp(self):
        super(CWCBaseTest, self).setUp()
        self.product = self.input.param("product", "cb")
        self.vbuckets = self.input.param("vbuckets", 128)
        self.version = self.input.param("version", None)
        self.doc_ops = self.input.param("doc_ops", None)
        self.upload = self.input.param("upload", False)
        self.uploadHost = self.input.param("uploadHost", None)
        self.customer = self.input.param("customer", "")
        self.ticket = self.input.param("ticket", "")
        self.collect_nodes = self.input.param("collect_nodes", "*")
        self.cancel_collect = self.input.param("cancel_collect", False)
        self.cli_collect_nodes = self.input.param("cli_collect_nodes",
                                                  "--all-nodes")
        self.cli_cancel_collect = self.input.param("cli_cancel_collect", False)
        self.cli_upload = self.input.param("cli_upload", False)
        self.shutdown_nodes = self.input.param("shutdown_nodes", None)
        self.add_services = self.input.param("add_services", None)
        if self.add_services is not None:
            if "-" in self.add_services:
                self.add_services = self.add_services.split("-")
            else:
                self.add_services = [self.add_services]
        if self.doc_ops is not None:
            self.doc_ops = self.doc_ops.split(";")
        self.defaul_map_func = "function (doc) {\n  emit(doc._id, doc);\n}"

        #define the data that will be used to test
        self.blob_generator = self.input.param("blob_generator", True)
        server = self.servers[0]
        rest = RestConnection(server)
        if self.blob_generator:
            #gen_load data is used for upload before each test(1000 items by default)
            self.gen_load = BlobGenerator('test',
                                          'test-',
                                          self.value_size,
                                          end=self.num_items)
            #gen_update is used for doing mutation for 1/2th of uploaded data
            self.gen_update = BlobGenerator('test',
                                            'test-',
                                            self.value_size,
                                            end=(self.num_items // 2 - 1))
            #upload data before each test
            self._load_all_buckets(self.servers[0], self.gen_load, "create", 0)
        else:
            self._load_doc_data_all_buckets()
        shell = RemoteMachineShellConnection(self.master)
        type = shell.extract_remote_info().distribution_type
        shell.disconnect()
        self.log_path = ""
        self.bin_path = ""
        self.os_type = ""
        if type.lower() == 'windows':
            self.os_type = 'windows'
            self.log_path = WINDOWS_CW_LOG_PATH
            self.bin_path = WIN_COUCHBASE_BIN_PATH
        elif type.lower() in ["ubuntu", "centos", "red hat"]:
            self.os_type = "unix"
            self.log_path = LINUX_CW_LOG_PATH
            self.bin_path = LINUX_COUCHBASE_BIN_PATH
        elif type.lower() == "mac":
            self.os_type = 'mac'
            self.log_path = MAC_CW_LOG_PATH
            self.bin_path = MAC_COUCHBASE_BIN_PATH
    def test_failover(self):
        self.setup_for_test(skip_data_loading=True)
        self.rebalance_node = self.input.param('rebalance_node','CC')
        self.how_many = self.input.param('how_many',1)
        self.restart_rebalance = self.input.param('restart_rebalance',False)
        self.replica_change = self.input.param('replica_change',0)
        self.add_back = self.input.param('add_back',False)
        
        query = "select sleep(count(*),50000) from {0};".format(self.cbas_dataset_name)
        handles = self.cbas_util._run_concurrent_queries(query,"async",10)
        self.ingestion_in_progress()
        
        if self.rebalance_node == "CC":
            node_in_test = [self.cbas_node]
            otpNodes = [self.otpNodes[0]]
            self.cbas_util.closeConn()
            self.cbas_util = cbas_utils(self.master, self.cbas_servers[0])
            self.cbas_util.createConn("default")
            
            self.cbas_node = self.cbas_servers[0]
        elif self.rebalance_node == "NC":
            node_in_test = self.cbas_servers[:self.how_many]
            otpNodes = self.nc_otpNodes[:self.how_many]
        else:
            node_in_test = [self.cbas_node] + self.cbas_servers[:self.how_many]
            otpNodes = self.otpNodes[:self.how_many+1]
            self.cbas_util.closeConn()
            self.cbas_util = cbas_utils(self.master, self.cbas_servers[self.how_many])
            self.cbas_util.createConn("default")
            
        replicas_before_rebalance=len(self.cbas_util.get_replicas_info(self.shell))
        items_in_cbas_bucket = 0
        start_time=time.time()
        while (items_in_cbas_bucket == 0 or items_in_cbas_bucket == -1) and time.time()<start_time+60:
            try:
                items_in_cbas_bucket, _ = self.cbas_util.get_num_items_in_cbas_dataset(self.cbas_dataset_name)
            except:
                pass
            self.sleep(1)
        self.log.info("Items before failover node: %s"%items_in_cbas_bucket)
        
        if self.restart_rebalance:
            graceful_failover = self.input.param("graceful_failover", False)
            failover_task = self._cb_cluster.async_failover(self.input.servers,
                                                            node_in_test,
                                                            graceful_failover)
            failover_task.get_result()
            if self.add_back:
                for otpnode in otpNodes:
                    self.rest.set_recovery_type('ns_1@' + otpnode.ip, "full")
                    self.rest.add_back_node('ns_1@' + otpnode.ip)
                self.rebalance(wait_for_completion=False)
            else:
                self.rebalance(ejected_nodes=[node.id for node in otpNodes], wait_for_completion=False)
            self.sleep(2)
            if self.rest._rebalance_progress_status() == "running":
                self.assertTrue(self.rest.stop_rebalance(wait_timeout=120), "Failed while stopping rebalance.")
                if self.add_back:
                    self.rebalance(wait_for_completion=False)
                else:
                    self.rebalance(ejected_nodes=[node.id for node in otpNodes], wait_for_completion=False)
            else:
                self.fail("Rebalance completed before the test could have stopped rebalance.")
        else:
            graceful_failover = self.input.param("graceful_failover", False)
            failover_task = self._cb_cluster.async_failover(self.input.servers,
                                                            node_in_test,
                                                            graceful_failover)
            failover_task.get_result()
            if self.add_back:
                for otpnode in otpNodes:
                    self.rest.set_recovery_type('ns_1@' + otpnode.ip, "full")
                    self.rest.add_back_node('ns_1@' + otpnode.ip)
            self.rebalance(wait_for_completion=False)

        replicas_before_rebalance -= self.replica_change
        self.sleep(5)
        str_time = time.time()
        while self.rest._rebalance_progress_status() == "running" and time.time()<str_time+300:
            replicas = self.cbas_util.get_replicas_info(self.shell)
            if replicas:
                for replica in replicas:
                    self.log.info("replica state during rebalance: %s"%replica['status'])
        self.sleep(15)
        replicas = self.cbas_util.get_replicas_info(self.shell)
        replicas_after_rebalance=len(replicas)
        self.assertEqual(replicas_after_rebalance, replicas_before_rebalance, "%s,%s"%(replicas_after_rebalance,replicas_before_rebalance))
        
        for replica in replicas:
            self.log.info("replica state during rebalance: %s"%replica['status'])
            self.assertEqual(replica['status'], "IN_SYNC","Replica state is incorrect: %s"%replica['status'])
                                
        items_in_cbas_bucket = 0
        start_time=time.time()
        while (items_in_cbas_bucket == 0 or items_in_cbas_bucket == -1) and time.time()<start_time+60:
            try:
                items_in_cbas_bucket, _ = self.cbas_util.get_num_items_in_cbas_dataset(self.cbas_dataset_name)
            except:
                pass
            self.sleep(1)
        self.log.info("After rebalance operation docs in CBAS bucket : %s"%items_in_cbas_bucket)
        if items_in_cbas_bucket < self.num_items*2 and items_in_cbas_bucket>self.num_items:
            self.log.info("Data Ingestion Interrupted successfully")
        elif items_in_cbas_bucket < self.num_items:
            self.log.info("Data Ingestion did interrupted and restarting from 0.")
        else:
            self.log.info("Data Ingestion did not interrupted but complete before rebalance operation.")
            
        run_count = 0
        fail_count = 0
        success_count = 0
        aborted_count = 0
        shell=RemoteMachineShellConnection(node_in_test[0])
        for handle in handles:
            status, hand = self.cbas_util.retrieve_request_status_using_handle(node_in_test, handle, shell)
            if status == "running":
                run_count += 1
                self.log.info("query with handle %s is running."%handle)
            elif status == "failed":
                fail_count += 1
                self.log.info("query with handle %s is failed."%handle)
            elif status == "success":
                success_count += 1
                self.log.info("query with handle %s is successful."%handle)
            else:
                aborted_count +=1
                self.log.info("Queued job is deleted: %s"%status)
                
        self.log.info("After service restart %s queued jobs are Running."%run_count)
        self.log.info("After service restart %s queued jobs are Failed."%fail_count)
        self.log.info("After service restart %s queued jobs are Successful."%success_count)
        self.log.info("After service restart %s queued jobs are Aborted."%aborted_count)
        
        if self.rebalance_node == "NC":
            self.assertTrue(aborted_count==0, "Some queries aborted")
        
        query = "select count(*) from {0};".format(self.cbas_dataset_name)
        self.cbas_util._run_concurrent_queries(query,"immediate",100)
        
        if not self.cbas_util.validate_cbas_dataset_items_count(self.cbas_dataset_name,self.num_items*2):
            self.fail("No. of items in CBAS dataset do not match that in the CB bucket")
            
        self.ingest_more_data()
Example #39
0
class LogRedactionTests(CBASBaseTest, LogRedactionBase):
    def setUp(self):
        super(LogRedactionTests, self).setUp()
        self.analytics_log_files = [
            'analytics_debug.log', 'analytics_info.log',
            'analytics_access.log', 'analytics_dcpdebug.log',
            'analytics_error.log', 'analytics_shutdown.log',
            'analytics_warn.log'
        ]
        self.user_data_search_keys = [
            'Administrator', '@cbas-cbauth', '@cbas',
            'id like \\"21st_amendment_brewery_cafe%\\"',
            'select name from ds limit 1', '21st_amendment_brewery_cafe',
            'use dv1', 'SELECT name FROM ds where state=', 'Texas'
        ]
        self.system_metadata_search_keys = [
            self.cb_bucket_name, self.cbas_node.ip, self.master.ip,
            'port:9110', 'port:8095', self.index_name
        ]

        self.shell = RemoteMachineShellConnection(self.cbas_node)
        self.cbas_url = "http://{0}:{1}/analytics/service".format(
            self.cbas_node.ip, 8095)

        self.log.info('Enable partial redaction')
        self.set_redaction_level()

        self.log.info('Update log level to All for all loggers')
        self.update_log_level()

    def update_log_level(self):
        default_logger_config_dict = {
            'org.apache.asterix': 'ALL',
            'com.couchbase.client.dcp.conductor.DcpChannel': 'ALL',
            'com.couchbase.client.core.node': 'ALL',
            'com.couchbase.analytics': 'ALL',
            'org.apache.hyracks': 'ALL',
            'org.apache.hyracks.http.server.CLFLogger': 'ALL',
            '': 'ALL'
        }
        _, node_id, _ = self.cbas_util.retrieve_nodes_config()
        default_logger_config_dict[
            'org.apache.hyracks.util.trace.Tracer.Traces@' + node_id] = 'ALL'

        self.log.info('Set logging level to ALL')
        status, content, response = self.cbas_util.set_log_level_on_cbas(
            default_logger_config_dict)
        self.assertTrue(status,
                        msg='Response status incorrect for SET request')

    def generate_user_data(self):
        self.log.info('Load beer-sample bucket')
        self.load_sample_buckets(servers=[self.master],
                                 bucketName=self.cb_bucket_name,
                                 total_items=self.beer_sample_docs_count)
        self.cbas_util.createConn(self.cbas_bucket_name)

        self.log.info('Create dataset')
        self.cbas_util.create_dataset_on_bucket(self.cb_bucket_name,
                                                self.cbas_dataset_name)

        self.log.info('Create secondary index')
        self.index_field = self.input.param('index_field', 'name:string')
        create_idx_statement = 'create index {0} if not exists on {1}({2})'.format(
            self.index_name, self.cbas_dataset_name, self.index_field)
        status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
            create_idx_statement)
        self.assertTrue(status == 'success', 'Create Index query failed')

        self.log.info('Connect link')
        self.cbas_util.connect_link()

        self.log.info('Verify dataset count')
        self.cbas_util.validate_cbas_dataset_items_count(
            self.cbas_dataset_name, self.beer_sample_docs_count)

        self.log.info('Execute a parameterized query')
        self.cbas_parameterized_query = 'SELECT name FROM ds where state=$1'
        self.param = [{"args": ["Texas"]}]
        status, _, errors, cbas_result, _ = self.cbas_util.execute_parameter_statement_on_cbas_util(
            self.cbas_parameterized_query, parameters=self.param)

        self.log.info('Execute query with incorrect user credentials')
        self.cbas_correct_query = 'select name from %s limit 1' % self.cbas_dataset_name
        output, _ = self.shell.execute_command(
            'curl -X POST {0} -u {1}:{2} -d "statement=select * from ds limit 1"'
            .format(self.cbas_url, "Administrator", "pass"))

        self.log.info('Execute cbas query that is incorrect')
        self.cbas_incorrect_query = 'select meta().* from ds where meta().id like "21st_amendment_brewery_cafe%"'
        self.cbas_util.execute_statement_on_cbas_util(
            self.cbas_incorrect_query)

        self.log.info('Create custom dataverse')
        self.dataverse_name = 'dv1'
        self.cbas_util.create_dataverse_on_cbas(dataverse_name='dv1')

        self.log.info('Create dataset on dataverse')
        self.cbas_util.create_dataset_on_bucket(self.cb_bucket_name,
                                                self.cbas_dataset_name,
                                                dataverse=self.dataverse_name)

    def generate_system_and_metadata(self):
        self.log.info('Add a new analytics node and rebalance')
        self.add_node(self.cbas_servers[0], services=["cbas"], rebalance=True)

        self.log.info('Add some user data to backup')
        self.generate_user_data()

        self.log.info('Backup analytics metadata for beer-sample')
        response = self.cbas_util.backup_cbas_metadata(
            bucket_name=self.cb_bucket_name)
        self.assertEquals(response['status'],
                          'success',
                          msg='Failed to backup analytics metadata')

        self.log.info('Restore Analytics metadata for beer-sample using API')
        response = self.cbas_util.restore_cbas_metadata(
            response, bucket_name=self.cb_bucket_name)
        self.assertEquals(response['status'],
                          'success',
                          msg='Failed to restore analytics metadata')

        self.log.info('Access analytics cluster information')
        self.cbas_util.fetch_analytics_cluster_response(self.shell)

        self.log.info('Access analytics cluster configs information')
        self.cbas_util.fetch_service_parameter_configuration_on_cbas()

        self.log.info('Access analytics bucket information')
        self.cbas_util.fetch_bucket_state_on_cbas()

        self.log.info('Access analytics node diagnostics information')
        self.cbas_util.get_analytics_diagnostics(self.cbas_node)

        self.log.info('Fetch analytics stats')
        self.cbas_util.fetch_cbas_stats()

    def generate_audit_events(self):
        update_config_map = {'storageMaxActiveWritableDatasets': 8}
        status, _, _ = self.cbas_util.update_service_parameter_configuration_on_cbas(
            update_config_map)
        self.assertTrue(status, msg='Failed to update config')

    def enable_audit(self):
        audit_obj = audit(host=self.master)
        current_state = audit_obj.getAuditStatus()
        if current_state:
            audit_obj.setAuditEnable('false')
        audit_obj.setAuditEnable('true')

    def start_log_collection(self):
        self.start_logs_collection()
        result = self.monitor_logs_collection()
        logs_path = result['perNode']['ns_1@' + str(self.cbas_node.ip)]['path']
        redact_file_name = logs_path.split('/')[-1]
        non_redact_file_name = logs_path.split('/')[-1].replace(
            '-redacted', '')
        remote_path = logs_path[0:logs_path.rfind('/') + 1]
        return non_redact_file_name, redact_file_name, remote_path

    def verify_audit_logs_are_not_redacted(self):
        self.log.info('Enable audit')
        self.enable_audit()

        self.log.info('Generate audit events')
        self.generate_audit_events()

        self.log.info('Verify audit log file is not redacted')
        output, _ = self.check_audit_logs_are_not_redacted()
        if len(output) != 0:
            self.fail(msg='Audit logs must not be redacted')

    def verify_user_data_is_redacted(self):
        self.log.info('Generate user data')
        self.generate_user_data()

        self.log.info('Collect logs')
        non_redact_file_name, redact_file_name, remote_path = self.start_log_collection(
        )

        self.log.info('Verify redacted log file exist')
        self.verify_log_files_exist(server=self.cbas_node,
                                    remotepath=remote_path,
                                    redactFileName=redact_file_name,
                                    nonredactFileName=non_redact_file_name)

        self.log.info('Verify user data is redacted')
        for log_file in self.analytics_log_files:
            self.verify_log_redaction(
                server=self.cbas_node,
                remotepath=remote_path,
                redactFileName=redact_file_name,
                nonredactFileName=non_redact_file_name,
                logFileName='ns_server.{0}'.format(log_file))

        self.log.info(
            'Verify user data is not displayed in any analytics logs'
        )  # The user data must be redacted in logs. We have bugs logged - MB-33110/MB-33109/MB-33093
        user_data_not_redacted = False
        for search_key in self.user_data_search_keys:
            output, _ = self.check_for_user_data_in_redacted_logs(
                search_key,
                redact_file_name,
                remote_path,
                server=self.cbas_node)
            if len(output) > 0:
                user_data_not_redacted = True
                self.log.info(
                    '`{0}` Occur\'s {1} times in analytics logs'.format(
                        search_key, len(output)))
        if user_data_not_redacted:
            self.fail(
                msg=
                'User data is not redacted. Refer above logs for non redacted user data'
            )

    def verify_user_errors_are_not_surrounded_by_ud_tags(self):
        user_error_queries = [
            'select 10000000000000000000', 'SELECT BITAND(3,6) AS BitAND'
        ]
        self.log.info(
            'Execute queries that result in errors displayed on workbench UI')
        for query in user_error_queries:
            status, _, errors, _, _ = self.cbas_util.execute_statement_on_cbas_util(
                query)
            self.assertTrue(
                "<ud>" not in errors[0]["msg"],
                msg='User error msg must not be surrounded by ud tags')

        self.log.info('Check user data is surrounded by ud tags in logs')
        for query in user_error_queries:
            self.check_user_data_in_server_logs(query.replace("(",
                                                              "\(").replace(
                                                                  ")", "\)"),
                                                server=self.cbas_node)

    def verify_system_and_metadata_is_not_redacted(self):
        self.log.info('Generate system and metadata')
        self.generate_system_and_metadata()

        self.log.info('Collect logs')
        non_redact_file_name, redact_file_name, remote_path = self.start_log_collection(
        )

        self.log.info('Verify System/Metadata is not redacted')
        self.set_redacted_directory(server=self.cbas_node,
                                    remote_path=remote_path,
                                    redact_file_name=redact_file_name,
                                    log_file_name="couchbase.log")
        for search_key in self.system_metadata_search_keys:
            output, _ = self.check_for_user_data_in_redacted_logs(
                search_key,
                redact_file_name,
                remote_path,
                server=self.cbas_node)
            if len(output) == 0:
                self.fail(
                    msg='%s - metadata/system data must not be redacted - ' %
                    search_key)

    def tearDown(self):
        super(LogRedactionTests, self).tearDown()
Example #40
0
    def _reboot_cluster(self, data_set):
        try:
            for server in self.servers[0:self.helper.num_nodes_reboot]:
                shell = RemoteMachineShellConnection(server)
                if shell.extract_remote_info().type.lower() == 'windows':
                    o, r = shell.execute_command("shutdown -r -f -t 0")
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} is being stopped".format(
                        server.ip))
                elif shell.extract_remote_info().type.lower() == 'linux':
                    o, r = shell.execute_command("reboot")
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} is being stopped".format(
                        server.ip))

                    time.sleep(120)
                    shell = RemoteMachineShellConnection(server)
                    command = "/sbin/iptables -F"
                    o, r = shell.execute_command(command)
                    shell.log_command_output(o, r)
                    shell.disconnect()
                    self.log.info("Node {0} backup".format(server.ip))
        finally:
            self.log.info("Warming-up server ..".format(server.ip))
            time.sleep(100)
Example #41
0
    def test_cbcollect_with_redaction_enabled_with_xdcr(self):
        rest_src = RestConnection(self.master)
        rest_src.remove_all_replications()
        rest_src.remove_all_remote_clusters()

        rest_dest = RestConnection(self.servers[1])
        rest_dest_helper = RestHelper(rest_dest)

        try:
            rest_src.remove_all_replications()
            rest_src.remove_all_remote_clusters()
            self.set_redaction_level()
            rest_src.add_remote_cluster(self.servers[1].ip,
                                        self.servers[1].port,
                                        self.servers[1].rest_username,
                                        self.servers[1].rest_password, "C2")
            """ at dest cluster """
            self.add_built_in_server_user(node=self.servers[1])
            rest_dest.create_bucket(bucket='default', ramQuotaMB=512)
            bucket_ready = rest_dest_helper.vbucket_map_ready('default')
            if not bucket_ready:
                self.fail(
                    "Bucket default at dest not created after 120 seconds.")
            repl_id = rest_src.start_replication('continuous', 'default', "C2")
            if repl_id is not None:
                self.log.info("Replication created successfully")
            gen = BlobGenerator("ent-backup",
                                "ent-backup-",
                                self.value_size,
                                end=self.num_items)
            tasks = self._async_load_all_buckets(self.master, gen, "create", 0)
            for task in tasks:
                task.result()
            self.sleep(10)
            """ enable firewall """
            if self.interrupt_replication:
                RemoteUtilHelper.enable_firewall(self.master, xdcr=True)
            """ start collect logs """
            self.start_logs_collection()
            result = self.monitor_logs_collection()
            """ verify logs """
            try:
                logs_path = result["perNode"]["ns_1@" +
                                              str(self.master.ip)]["path"]
            except KeyError:
                logs_path = result["perNode"]["[email protected]"]["path"]
            redactFileName = logs_path.split('/')[-1]
            nonredactFileName = logs_path.split('/')[-1].replace(
                '-redacted', '')
            remotepath = logs_path[0:logs_path.rfind('/') + 1]
            self.verify_log_files_exist(remotepath=remotepath,
                                        redactFileName=redactFileName,
                                        nonredactFileName=nonredactFileName)
            self.log.info("Verify on log ns_server.goxdcr.log")
            self.verify_log_redaction(remotepath=remotepath,
                                      redactFileName=redactFileName,
                                      nonredactFileName=nonredactFileName,
                                      logFileName="ns_server.goxdcr.log")
        finally:
            """ clean up xdcr """
            if self.interrupt_replication:
                shell = RemoteMachineShellConnection(self.master)
                shell.disable_firewall()
                shell.disconnect()
            rest_dest.delete_bucket()
            rest_src.remove_all_replications()
            rest_src.remove_all_remote_clusters()
    def test_cc_swap_rebalance(self):
        self.restart_rebalance = self.input.param('restart_rebalance',False)
        
        self.setup_for_test(skip_data_loading=True)
        query = "select sleep(count(*),50000) from {0};".format(self.cbas_dataset_name)
        handles = self.cbas_util._run_concurrent_queries(query,"async",10)
        self.ingestion_in_progress()
        
        replicas_before_rebalance=len(self.cbas_util.get_replicas_info(self.shell))
        self.cbas_util.closeConn()
        self.cbas_util = cbas_utils(self.master, self.cbas_servers[0])
        self.cbas_util.createConn("default")
        self.cbas_node = self.cbas_servers[0]
        
        self.cluster_util.add_node(node=self.cbas_servers[-1],rebalance=False)
        swap_nc = self.input.param('swap_nc', False)
        if not swap_nc:
            out_nodes = [self.otpNodes[0]]
        else:
            out_nodes = [self.otpNodes[1]]    
        
        self.cluster_util.remove_node(out_nodes, wait_for_rebalance=False)
        self.sleep(5, "Wait for sometime after rebalance started.")
        if self.restart_rebalance:
            if self.rest._rebalance_progress_status() == "running":
                self.assertTrue(self.rest.stop_rebalance(wait_timeout=120), "Failed while stopping rebalance.")
                self.sleep(10)
            else:
                self.fail("Rebalance completed before the test could have stopped rebalance.")
            self.rebalance(ejected_nodes=[node.id for node in out_nodes], wait_for_completion=False)
        self.sleep(5)
        str_time = time.time()
        while self.rest._rebalance_progress_status() == "running" and time.time()<str_time+300:
            replicas = self.cbas_util.get_replicas_info(self.shell)
            if replicas:
                for replica in replicas:
                    self.log.info("replica state during rebalance: %s"%replica['status'])
        self.sleep(20)
        
        replicas = self.cbas_util.get_replicas_info(self.shell)
        replicas_after_rebalance=len(replicas)
        self.assertEqual(replicas_after_rebalance, replicas_before_rebalance, "%s,%s"%(replicas_after_rebalance,replicas_before_rebalance))
        
        for replica in replicas:
            self.log.info("replica state during rebalance: %s"%replica['status'])
            self.assertEqual(replica['status'], "IN_SYNC","Replica state is incorrect: %s"%replica['status'])
                                
#         items_in_cbas_bucket, _ = self.cbas_util.get_num_items_in_cbas_dataset(self.cbas_dataset_name)
#         self.log.info("Items before service restart: %s"%items_in_cbas_bucket)
                
        items_in_cbas_bucket = 0
        start_time=time.time()
        while (items_in_cbas_bucket == 0 or items_in_cbas_bucket == -1) and time.time()<start_time+60:
            try:
                items_in_cbas_bucket, _ = self.cbas_util.get_num_items_in_cbas_dataset(self.cbas_dataset_name)
            except:
                pass
            self.sleep(1)
        self.log.info("After rebalance operation docs in CBAS bucket : %s"%items_in_cbas_bucket)
        if items_in_cbas_bucket < self.num_items*2 and items_in_cbas_bucket>self.num_items:
            self.log.info("Data Ingestion Interrupted successfully")
        elif items_in_cbas_bucket < self.num_items:
            self.log.info("Data Ingestion did interrupted and restarting from 0.")
        else:
            self.log.info("Data Ingestion did not interrupted but complete before rebalance operation.")
            
        run_count = 0
        fail_count = 0
        success_count = 0
        aborted_count = 0
        shell=RemoteMachineShellConnection(self.master)
        for handle in handles:
            status, hand = self.cbas_util.retrieve_request_status_using_handle(self.master, handle, shell)
            if status == "running":
                run_count += 1
                self.log.info("query with handle %s is running."%handle)
            elif status == "failed":
                fail_count += 1
                self.log.info("query with handle %s is failed."%handle)
            elif status == "success":
                success_count += 1
                self.log.info("query with handle %s is successful."%handle)
            else:
                aborted_count +=1
                self.log.info("Queued job is deleted: %s"%status)
                
        self.log.info("After service restart %s queued jobs are Running."%run_count)
        self.log.info("After service restart %s queued jobs are Failed."%fail_count)
        self.log.info("After service restart %s queued jobs are Successful."%success_count)
        self.log.info("After service restart %s queued jobs are Aborted."%aborted_count)
        
        query = "select count(*) from {0};".format(self.cbas_dataset_name)
        self.cbas_util._run_concurrent_queries(query,"immediate",100)
        
        if not self.cbas_util.validate_cbas_dataset_items_count(self.cbas_dataset_name,self.num_items*2):
            self.fail("No. of items in CBAS dataset do not match that in the CB bucket")
        self.ingest_more_data()
class OpsChangeCasTests(BucketConfig):

    def setUp(self):
        super(OpsChangeCasTests, self).setUp()
        self.prefix = "test_"
        self.expire_time = self.input.param("expire_time", 35)
        self.item_flag = self.input.param("item_flag", 0)
        self.value_size = self.input.param("value_size", 256)
        self.items = self.input.param("items", 20)
        self.rest = RestConnection(self.master)
        self.client = VBucketAwareMemcached(self.rest, self.bucket)

    def tearDown(self):
        super(OpsChangeCasTests, self).tearDown()

    def check_rebalance_complete(self):
        for i in range(0, 6):
            try:
                shell = RemoteMachineShellConnection(self.master)
                break
            except:
                self.log.info("Unable to connect to the host. "
                              "Machine has not restarted")
                self.sleep(60, "Sleep for couple of minutes and try "
                               "again")

    def test_meta_rebalance_out(self):
        KEY_NAME = 'key1'

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, self.bucket)

        for i in range(10):
            # set a key
            value = 'value' + str(i)
            client.memcached(KEY_NAME).set(KEY_NAME, 0, 0, json.dumps({'value':value}))
            vbucket_id = client._get_vBucket_id(KEY_NAME)
            #print 'vbucket_id is {0}'.format(vbucket_id)
            mc_active = client.memcached(KEY_NAME)
            mc_master = client.memcached_for_vbucket(vbucket_id )
            mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

            cas_active = mc_active.getMeta(KEY_NAME)[4]
            #print 'cas_a {0} '.format(cas_active)

        max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(client._get_vBucket_id(KEY_NAME)) + ':max_cas'] )

        self.assertTrue(cas_active == max_cas, '[ERROR]Max cas  is not 0 it is {0}'.format(cas_active))

        # remove that node
        self.log.info('Remove the node with active data')

        rebalance = self.cluster.async_rebalance(self.servers[-1:], [], [self.master])

        rebalance.result()
        replica_CAS = mc_replica.getMeta(KEY_NAME)[4]
        get_meta_resp = mc_replica.getMeta(KEY_NAME, request_extended_meta_data=False)
        #print 'replica CAS {0}'.format(replica_CAS)
        #print 'replica ext meta {0}'.format(get_meta_resp)

        # add the node back
        self.log.info('Add the node back, the max_cas should be healed')
        rebalance = self.cluster.async_rebalance(self.servers[-1:], [self.master], [])

        rebalance.result()

        # verify the CAS is good
        client = VBucketAwareMemcached(rest, self.bucket)
        mc_active = client.memcached(KEY_NAME)
        active_CAS = mc_active.getMeta(KEY_NAME)[4]
        #print 'active cas {0}'.format(active_CAS)

        self.assertTrue(replica_CAS == active_CAS, 'cas mismatch active: {0} replica {1}'.format(active_CAS, replica_CAS))
        # not supported in 4.6 self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')

    def test_meta_failover(self):
        KEY_NAME = 'key2'

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, self.bucket)

        for i in range(10):
            # set a key
            value = 'value' + str(i)
            client.memcached(KEY_NAME).set(KEY_NAME, 0, 0, json.dumps({'value':value}))
            vbucket_id = client._get_vBucket_id(KEY_NAME)
            #print 'vbucket_id is {0}'.format(vbucket_id)
            mc_active = client.memcached(KEY_NAME)
            mc_master = client.memcached_for_vbucket( vbucket_id )
            mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

            cas_active = mc_active.getMeta(KEY_NAME)[4]
            #print 'cas_a {0} '.format(cas_active)

        max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(client._get_vBucket_id(KEY_NAME)) + ':max_cas'] )

        self.assertTrue(cas_active == max_cas, '[ERROR]Max cas  is not 0 it is {0}'.format(cas_active))

        # failover that node
        self.log.info('Failing over node with active data {0}'.format(self.master))
        self.cluster.failover(self.servers, [self.master])

        self.log.info('Remove the node with active data {0}'.format(self.master))

        rebalance = self.cluster.async_rebalance(self.servers[:], [], [self.master])

        rebalance.result()
        self.check_rebalance_complete()

        replica_CAS = mc_replica.getMeta(KEY_NAME)[4]
        #print 'replica CAS {0}'.format(replica_CAS)

        # add the node back
        self.log.info('Add the node back, the max_cas should be healed')
        rebalance = self.cluster.async_rebalance(self.servers[-1:], [self.master], [])

        rebalance.result()

        # verify the CAS is good
        client = VBucketAwareMemcached(rest, self.bucket)
        mc_active = client.memcached(KEY_NAME)
        active_CAS = mc_active.getMeta(KEY_NAME)[4]
        #print 'active cas {0}'.format(active_CAS)

        get_meta_resp = mc_active.getMeta(KEY_NAME, request_extended_meta_data=False)
        #print 'replica CAS {0}'.format(replica_CAS)
        #print 'replica ext meta {0}'.format(get_meta_resp)

        self.assertTrue(replica_CAS == active_CAS, 'cas mismatch active: {0} replica {1}'.format(active_CAS, replica_CAS))
        self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')

    def test_meta_soft_restart(self):
        KEY_NAME = 'key2'

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, self.bucket)

        for i in range(10):
            # set a key
            value = 'value' + str(i)
            client.memcached(KEY_NAME).set(KEY_NAME, 0, 0, json.dumps({'value':value}))
            vbucket_id = client._get_vBucket_id(KEY_NAME)
            #print 'vbucket_id is {0}'.format(vbucket_id)
            mc_active = client.memcached(KEY_NAME)
            mc_master = client.memcached_for_vbucket( vbucket_id )
            mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

            cas_pre = mc_active.getMeta(KEY_NAME)[4]
            #print 'cas_a {0} '.format(cas_pre)

        max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(client._get_vBucket_id(KEY_NAME)) + ':max_cas'] )

        self.assertTrue(cas_pre == max_cas, '[ERROR]Max cas  is not 0 it is {0}'.format(cas_pre))

        # restart nodes
        self._restart_server(self.servers[:])

        # verify the CAS is good
        client = VBucketAwareMemcached(rest, self.bucket)
        mc_active = client.memcached(KEY_NAME)
        cas_post = mc_active.getMeta(KEY_NAME)[4]
        #print 'post cas {0}'.format(cas_post)

        get_meta_resp = mc_active.getMeta(KEY_NAME, request_extended_meta_data=False)
        #print 'post CAS {0}'.format(cas_post)
        #print 'post ext meta {0}'.format(get_meta_resp)

        self.assertTrue(cas_pre == cas_post, 'cas mismatch active: {0} replica {1}'.format(cas_pre, cas_post))
        # extended meta is not supported self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')

    def test_meta_hard_restart(self):
        KEY_NAME = 'key2'

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, self.bucket)

        for i in range(10):
            # set a key
            value = 'value' + str(i)
            client.memcached(KEY_NAME).set(KEY_NAME, 0, 0, json.dumps({'value':value}))
            vbucket_id = client._get_vBucket_id(KEY_NAME)
            #print 'vbucket_id is {0}'.format(vbucket_id)
            mc_active = client.memcached(KEY_NAME)
            mc_master = client.memcached_for_vbucket( vbucket_id )
            mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

            cas_pre = mc_active.getMeta(KEY_NAME)[4]
            #print 'cas_a {0} '.format(cas_pre)

        max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(client._get_vBucket_id(KEY_NAME)) + ':max_cas'] )

        self.assertTrue(cas_pre == max_cas, '[ERROR]Max cas  is not 0 it is {0}'.format(cas_pre))

        # reboot nodes
        self._reboot_server()

        self.check_rebalance_complete()
        # verify the CAS is good
        client = VBucketAwareMemcached(rest, self.bucket)
        mc_active = client.memcached(KEY_NAME)
        cas_post = mc_active.getMeta(KEY_NAME)[4]
        #print 'post cas {0}'.format(cas_post)

        get_meta_resp = mc_active.getMeta(KEY_NAME, request_extended_meta_data=False)
        #print 'post CAS {0}'.format(cas_post)
        #print 'post ext meta {0}'.format(get_meta_resp)

        self.assertTrue(cas_pre == cas_post, 'cas mismatch active: {0} replica {1}'.format(cas_pre, cas_post))
        # extended meta is not supported self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')

    ''' Test Incremental sets on cas and max cas values for keys
    '''
    def test_cas_set(self):
        self.log.info(' Starting test-sets')
        self._load_ops(ops='set', mutations=20)
        time.sleep(60)
        self._check_cas(check_conflict_resolution=False)

    ''' Test Incremental updates on cas and max cas values for keys
    '''
    def test_cas_updates(self):
        self.log.info(' Starting test-updates')
        self._load_ops(ops='set', mutations=20)
        #self._load_ops(ops='add')
        self._load_ops(ops='replace', mutations=20)
        #self._load_ops(ops='delete')
        self._check_cas(check_conflict_resolution=False)

    ''' Test Incremental deletes on cas and max cas values for keys
    '''
    def test_cas_deletes(self):
        self.log.info(' Starting test-deletes')
        self._load_ops(ops='set', mutations=20)
        #self._load_ops(ops='add')
        self._load_ops(ops='replace', mutations=20)
        self._load_ops(ops='delete')
        self._check_cas(check_conflict_resolution=False)

    ''' Test expiry on cas and max cas values for keys
    '''
    def test_cas_expiry(self):
        self.log.info(' Starting test-expiry')
        self._load_ops(ops='set', mutations=20)
        #self._load_ops(ops='add')
        #self._load_ops(ops='replace',mutations=20)
        self._load_ops(ops='expiry')
        self._check_cas(check_conflict_resolution=False)
        self._check_expiry()

    ''' Test touch on cas and max cas values for keys
    '''
    def test_cas_touch(self):
        self.log.info(' Starting test-touch')
        self._load_ops(ops='set', mutations=20)
        #self._load_ops(ops='add')
        #self._load_ops(ops='replace',mutations=20)
        self._load_ops(ops='touch')
        self._check_cas(check_conflict_resolution=False)

    ''' Test getMeta on cas and max cas values for keys
    '''
    def test_cas_getMeta(self):
        self.log.info(' Starting test-getMeta')
        self._load_ops(ops='set', mutations=20)
        self._check_cas(check_conflict_resolution=False)
        #self._load_ops(ops='add')
        self._load_ops(ops='replace', mutations=20)
        self._check_cas(check_conflict_resolution=False)

        self._load_ops(ops='delete')
        self._check_cas(check_conflict_resolution=False)



    def test_cas_setMeta_lower(self):

        self.log.info(' Starting test-getMeta')


        # set some kv
        self._load_ops(ops='set', mutations=1)
        #self._check_cas(check_conflict_resolution=False)

        k=0
        while k<10:

            key = "{0}{1}".format(self.prefix, k)
            k += 1

            vbucket_id = self.client._get_vBucket_id(key)
            self.log.info('For key {0} the vbucket is {1}'.format( key, vbucket_id ))
            #print 'vbucket_id is {0}'.format(vbucket_id)
            mc_active = self.client.memcached(key)
            mc_master = self.client.memcached_for_vbucket( vbucket_id )
            #mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)

            TEST_SEQNO = 123
            TEST_CAS = k

            rc = mc_active.getMeta(key)
            cas = rc[4] + 1

            self.log.info('Key {0} retrieved CAS is {1} and will set CAS to {2}'.format(key, rc[4], cas))
            rev_seqno = rc[3]



            # do a set meta based on the existing CAS
            set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, 123, cas)


            # check what get meta say
            rc = mc_active.getMeta(key)
            cas_post_meta = rc[4]
            self.log.info('Getmeta CAS is {0}'.format(cas_post_meta))
            self.assertTrue( cas_post_meta == cas, 'Meta expected {0} actual {1}'.format( cas, cas_post_meta))

            # and what stats says
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.log.info('Max CAS for key {0} vbucket is {1}'.format( key, max_cas))
            self.assertTrue(cas_post_meta >= max_cas, '[ERROR]Max cas  is not higher it is lower than {0}'.format(cas_post_meta))
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to original cas {0}'.format(cas))


            # do another mutation and compare
            mc_active.set(key, 0, 0, json.dumps({'value':'value3'}))
            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to cas {0}'.format(cas))

            # and then mix in a set with meta
            set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, 225, max_cas+1)
            cas_post_meta = mc_active.getMeta(key)[4]


            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(cas_post_meta == max_cas, '[ERROR]Max cas  is not higher it is lower than {0}'.format(cas_post_meta))


            # and one more mutation for good measure
            mc_active.set(key, 0, 0, json.dumps({'value':'value3'}))
            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to cas {0}'.format(cas))

    def test_cas_setMeta_higher(self):

        self.log.info(' Starting test-getMeta')
        self._load_ops(ops='set', mutations=20)
        self._check_cas(check_conflict_resolution=False)

        k=0

        while k<10:

            key = "{0}{1}".format(self.prefix, k)
            k += 1

            vbucket_id = self.client._get_vBucket_id(key)
            #print 'vbucket_id is {0}'.format(vbucket_id)
            mc_active = self.client.memcached(key)
            mc_master = self.client.memcached_for_vbucket( vbucket_id )
            #mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)
            get_meta_1 = mc_active.getMeta(key, request_extended_meta_data=False)
            #print 'cr {0}'.format(get_meta_1)
            #print '-'*100
            TEST_SEQNO = 123
            TEST_CAS = 9966180844186042368

            cas = mc_active.getMeta(key)[4]
            #set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, TEST_SEQNO, TEST_CAS, '123456789',vbucket_id,
             #   add_extended_meta_data=True, conflict_resolution_mode=1)
            set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, TEST_SEQNO, TEST_CAS)

            cas_post_meta = mc_active.getMeta(key)[4]
            get_meta_2 = mc_active.getMeta(key, request_extended_meta_data=False)
            #print 'cr2 {0}'.format(get_meta_2)

            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(cas_post_meta == max_cas, '[ERROR]Max cas  is not equal it is {0}'.format(cas_post_meta))
            self.assertTrue(max_cas > cas, '[ERROR]Max cas  is not higher than original cas {0}'.format(cas))

            mc_active.set(key, 0, 0, json.dumps({'value':'value3'}))
            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to cas {0}'.format(cas))

            set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, TEST_SEQNO, max_cas+1)

            #set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, 125, TEST_CAS+1, '223456789',vbucket_id,
            #    add_extended_meta_data=True, conflict_resolution_mode=1)
            cas_post_meta = mc_active.getMeta(key)[4]
            get_meta_3 = mc_active.getMeta(key, request_extended_meta_data=False)
            #print 'cr3 {0}'.format(get_meta_3)

            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )

            self.assertTrue(cas_post_meta == max_cas, '[ERROR]Max cas  is not lower it is higher than {0}'.format(cas_post_meta))
            #self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to original cas {0}'.format(cas))

            mc_active.set(key, 0, 0, json.dumps({'value':'value3'}))
            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to cas {0}'.format(cas))


    ''' Test deleteMeta on cas and max cas values for keys
    '''
    def test_cas_deleteMeta(self):

        self.log.info(' Starting test-deleteMeta')


        # load 20 kvs and check the CAS
        self._load_ops(ops='set', mutations=20)
        time.sleep(60)
        self._check_cas(check_conflict_resolution=False)

        k=0
        test_cas = 456

        while k<1:

            key = "{0}{1}".format(self.prefix, k)
            k += 1

            vbucket_id = self.client._get_vBucket_id(key)
            mc_active = self.client.memcached(key)
            mc_master = self.client.memcached_for_vbucket( vbucket_id )
            mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)

            TEST_SEQNO = 123
            test_cas = test_cas + 1


            # get the meta data
            cas = mc_active.getMeta(key)[4] + 1

            set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, TEST_SEQNO, cas)



            cas_post_meta = mc_active.getMeta(key)[4]

            # verify the observed CAS is as set

            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )

            self.assertTrue(max_cas == cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas, cas))


            mc_active.set(key, 0, 0, json.dumps({'value':'value3'}))
            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to cas {0}'.format(cas))

            # what is test cas for? Commenting out for now
            """
            set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, TEST_SEQNO, test_cas)
            cas_post_meta = mc_active.getMeta(key)[4]

            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(cas_post_meta < max_cas, '[ERROR]Max cas  is not higher it is lower than {0}'.format(cas_post_meta))
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to original cas {0}'.format(cas))
            """

            # test the delete

            mc_active.set(key, 0, 0, json.dumps({'value':'value3'}))
            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas == cas, '[ERROR]Max cas  is not equal to cas {0}'.format(cas))


            #
            self.log.info('Doing delete with meta, using a lower CAS value')
            get_meta_pre = mc_active.getMeta(key)[4]
            del_with_meta_resp = mc_active.del_with_meta(key, 0, 0, TEST_SEQNO, test_cas, test_cas+1)
            get_meta_post = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.assertTrue(max_cas > test_cas+1, '[ERROR]Max cas {0} is not greater than delete cas {1}'.format(max_cas, test_cas))




    ''' Testing skipping conflict resolution, whereby the last write wins, and it does neither cas CR nor rev id CR
    '''
    def test_cas_skip_conflict_resolution(self):

        self.log.info(' Starting test_cas_skip_conflict_resolution ..')
        self._load_ops(ops='set', mutations=20)
        self._check_cas(check_conflict_resolution=False)

        k=0

        #Check for first 20 keys
        while k<20:

            key = "{0}{1}".format(self.prefix, k)
            k += 1

            vbucket_id = self.client._get_vBucket_id(key)
            mc_active = self.client.memcached(key)
            mc_master = self.client.memcached_for_vbucket( vbucket_id )
            mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)

            low_seq=12

            cas = mc_active.getMeta(key)[4]
            pre_seq = mc_active.getMeta(key)[3]
            all = mc_active.getMeta(key)
            self.log.info('all meta data before set_meta_force {0}'.format(all))

            self.log.info('Forcing conflict_resolution to allow insertion of lower Seq Number')
            lower_cas = int(cas)-1
            #import pdb;pdb.set_trace()
            #set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, low_seq, lower_cas, '123456789',vbucket_id)
            set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, low_seq, lower_cas, 3)
            cas_post_meta = mc_active.getMeta(key)[4]
            all_post_meta = mc_active.getMeta(key)
            post_seq = mc_active.getMeta(key)[3]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.log.info('Expect No conflict_resolution to occur, and the last updated mutation to be the winner..')

            #print 'cas meta data after set_meta_force {0}'.format(cas_post_meta)
            #print 'all meta data after set_meta_force {0}'.format(all_post_meta)
            self.log.info('all meta data after set_meta_force {0}'.format(all_post_meta))

            self.assertTrue(max_cas == cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas, cas))
            self.assertTrue(pre_seq > post_seq, '[ERROR]Pre rev id {0} is not greater than post rev id {1}'.format(pre_seq, post_seq))

    ''' Testing revid based conflict resolution with timeSync enabled, where cas on either mutations match and it does rev id CR
        '''
    def test_revid_conflict_resolution(self):

        self.log.info(' Starting test_cas_revid_conflict_resolution ..')
        self._load_ops(ops='set', mutations=20)
        self._check_cas(check_conflict_resolution=False)

        k=0

        #Check for first 20 keys
        while k<20:

            key = "{0}{1}".format(self.prefix, k)
            k += 1

            vbucket_id = self.client._get_vBucket_id(key)
            mc_active = self.client.memcached(key)
            mc_master = self.client.memcached_for_vbucket( vbucket_id )
            mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)

            new_seq=121

            cas = mc_active.getMeta(key)[4]
            pre_seq = mc_active.getMeta(key)[3]
            all = mc_active.getMeta(key)
            self.log.info('all meta data before set_meta_force {0}'.format(all))

            self.log.info('Forcing conflict_resolution to rev-id by matching inserting cas ')
            set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, new_seq, cas, '123456789', vbucket_id,
                                add_extended_meta_data=True, conflict_resolution_mode=1)
            cas_post_meta = mc_active.getMeta(key)[4]
            all_post_meta = mc_active.getMeta(key)
            post_seq = mc_active.getMeta(key)[3]
            get_meta_2 = mc_active.getMeta(key, request_extended_meta_data=False)
            #print 'cr2 {0}'.format(get_meta_2)
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.log.info('Expect No conflict_resolution to occur, and the last updated mutation to be the winner..')
            self.log.info('all meta data after set_meta_force {0}'.format(all_post_meta))

            self.assertTrue(max_cas == cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas, cas))
            self.assertTrue(pre_seq < post_seq, '[ERROR]Pre rev id {0} is not greater than post rev id {1}'.format(pre_seq, post_seq))



    ''' Testing conflict resolution, where timeSync is enabled and cas is lower but higher revid, expect Higher Cas to Win
        '''
    def test_cas_conflict_resolution(self):

        self.log.info(' Starting test_cas_conflict_resolution ..')
        self._load_ops(ops='set', mutations=20)
        self._check_cas(check_conflict_resolution=False)

        k=0

        #Check for first 20 keys
        while k<20:

            key = "{0}{1}".format(self.prefix, k)
            k += 1

            vbucket_id = self.client._get_vBucket_id(key)
            mc_active = self.client.memcached(key)
            mc_master = self.client.memcached_for_vbucket( vbucket_id )
            mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)

            new_seq=121

            cas = mc_active.getMeta(key)[4]
            pre_seq = mc_active.getMeta(key)[3]
            all = mc_active.getMeta(key)
            self.log.info('all meta data before set_meta_force {0}'.format(all))

            lower_cas = int(cas)-100
            self.log.info('Forcing lower rev-id to win with higher CAS value, instead of higher rev-id with Lower Cas ')
            #set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, new_seq, lower_cas, '123456789',vbucket_id)
            try:
                set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, new_seq, lower_cas)
            except mc_bin_client.MemcachedError as e:
                # this is expected
                pass

            cas_post_meta = mc_active.getMeta(key)[4]
            all_post_meta = mc_active.getMeta(key)
            post_seq = mc_active.getMeta(key)[3]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            self.log.info('Expect CAS conflict_resolution to occur, and the first mutation to be the winner..')

            self.log.info('all meta data after set_meta_force {0}'.format(all_post_meta))
            self.assertTrue(max_cas == cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas, cas))
            #self.assertTrue(pre_seq < post_seq, '[ERROR]Pre rev id {0} is not greater than post rev id {1}'.format(pre_seq, post_seq))

    ''' Testing revid based conflict resolution with timeSync enabled, where cas on either mutations match and it does rev id CR
        and retains it after a restart server'''
    def test_restart_revid_conflict_resolution(self):

        self.log.info(' Starting test_restart_revid_conflict_resolution ..')
        self._load_ops(ops='set', mutations=20)

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, self.bucket)

        k=0

        key = "{0}{1}".format(self.prefix, k)

        vbucket_id = self.client._get_vBucket_id(key)
        mc_active = self.client.memcached(key)
        mc_master = self.client.memcached_for_vbucket( vbucket_id )
        mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id)


        # set a key
        value = 'value0'
        client.memcached(key).set(key, 0, 0, json.dumps({'value':value}))
        vbucket_id = client._get_vBucket_id(key)
        #print 'vbucket_id is {0}'.format(vbucket_id)
        mc_active = client.memcached(key)
        mc_master = client.memcached_for_vbucket( vbucket_id )
        mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

        new_seq=121

        pre_cas = mc_active.getMeta(key)[4]
        pre_seq = mc_active.getMeta(key)[3]
        pre_max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        all = mc_active.getMeta(key)
        get_meta_1 = mc_active.getMeta(key, request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_1)
        self.log.info('all meta data before set_meta_force {0}'.format(all))
        self.log.info('max_cas before set_meta_force {0}'.format(pre_max_cas))

        self.log.info('Forcing conflict_resolution to rev-id by matching inserting cas ')
        try:
            set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, new_seq, pre_cas, '123456789', vbucket_id)
        except mc_bin_client.MemcachedError as e:
            # this is expected
            pass
        cas_post = mc_active.getMeta(key)[4]
        all_post_meta = mc_active.getMeta(key)
        post_seq = mc_active.getMeta(key)[3]
        get_meta_2 = mc_active.getMeta(key, request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_2)
        max_cas_post = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        self.log.info('Expect RevId conflict_resolution to occur, and the last updated mutation to be the winner..')
        self.log.info('all meta data after set_meta_force {0}'.format(all_post_meta))

        #self.assertTrue(max_cas_post == pre_cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas_post, pre_cas))
        #self.assertTrue(pre_seq < post_seq, '[ERROR]Pre rev id {0} is not greater than post rev id {1}'.format(pre_seq, post_seq))


        # Restart Nodes
        self._restart_server(self.servers[:])

        # verify the CAS is good
        client = VBucketAwareMemcached(rest, self.bucket)
        mc_active = client.memcached(key)
        cas_restart = mc_active.getMeta(key)[4]
        #print 'post cas {0}'.format(cas_post)

        get_meta_resp = mc_active.getMeta(key, request_extended_meta_data=False)
        #print 'post CAS {0}'.format(cas_post)
        #print 'post ext meta {0}'.format(get_meta_resp)

        self.assertTrue(pre_cas == cas_post, 'cas mismatch active: {0} replica {1}'.format(pre_cas, cas_post))
        #self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')

    ''' Testing revid based conflict resolution with timeSync enabled, where cas on either mutations match and it does rev id CR
        and retains it after a rebalance server'''
    def test_rebalance_revid_conflict_resolution(self):

        self.log.info(' Starting test_rebalance_revid_conflict_resolution ..')
        self._load_ops(ops='set', mutations=20)
        key = 'key1'

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, self.bucket)

        value = 'value'
        client.memcached(key).set(key, 0, 0, json.dumps({'value':value}))
        vbucket_id = client._get_vBucket_id(key)
        #print 'vbucket_id is {0}'.format(vbucket_id)
        mc_active = client.memcached(key)
        mc_master = client.memcached_for_vbucket( vbucket_id )
        mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

        new_seq=121

        pre_cas = mc_active.getMeta(key)[4]
        pre_seq = mc_active.getMeta(key)[3]
        pre_max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        all = mc_active.getMeta(key)
        get_meta_1 = mc_active.getMeta(key, request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_1)
        self.log.info('all meta data before set_meta_force {0}'.format(all))
        self.log.info('max_cas before set_meta_force {0}'.format(pre_max_cas))

        self.log.info('Forcing conflict_resolution to rev-id by matching inserting cas ')
        #set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, new_seq, pre_cas, '123456789',vbucket_id)
        set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, new_seq, pre_cas)

        cas_post = mc_active.getMeta(key)[4]
        all_post_meta = mc_active.getMeta(key)
        post_seq = mc_active.getMeta(key)[3]
        get_meta_2 = mc_active.getMeta(key, request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_2)
        max_cas_post = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        self.log.info('Expect RevId conflict_resolution to occur, and the last updated mutation to be the winner..')
        self.log.info('all meta data after set_meta_force {0}'.format(all_post_meta))

        self.assertTrue(max_cas_post == pre_cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas_post, pre_cas))
        self.assertTrue(pre_seq < post_seq, '[ERROR]Pre rev id {0} is not greater than post rev id {1}'.format(pre_seq, post_seq))

        # remove that node
        self.log.info('Remove the node with active data')

        rebalance = self.cluster.async_rebalance(self.servers[-1:], [], [self.master])
        rebalance.result()
        self.check_rebalance_complete()
        replica_CAS = mc_replica.getMeta(key)[4]
        get_meta_resp = mc_replica.getMeta(key, request_extended_meta_data=False)
        #print 'replica CAS {0}'.format(replica_CAS)
        #print 'replica ext meta {0}'.format(get_meta_resp)

        # add the node back
        self.log.info('Add the node back, the max_cas should be healed')
        rebalance = self.cluster.async_rebalance(self.servers[-1:], [self.master], [])

        rebalance.result()

        # verify the CAS is good
        client = VBucketAwareMemcached(rest, self.bucket)
        mc_active = client.memcached(key)
        active_CAS = mc_active.getMeta(key)[4]
        print('active cas {0}'.format(active_CAS))

        self.assertTrue(replica_CAS == active_CAS, 'cas mismatch active: {0} replica {1}'.format(active_CAS, replica_CAS))
        #self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')

    ''' Testing revid based conflict resolution with timeSync enabled, where cas on either mutations match and it does rev id CR
        and retains it after a failover server'''
    def test_failover_revid_conflict_resolution(self):

        self.log.info(' Starting test_rebalance_revid_conflict_resolution ..')
        self._load_ops(ops='set', mutations=20)
        key = 'key1'

        rest = RestConnection(self.master)
        client = VBucketAwareMemcached(rest, self.bucket)

        value = 'value'
        client.memcached(key).set(key, 0, 0, json.dumps({'value':value}))
        vbucket_id = client._get_vBucket_id(key)
        #print 'vbucket_id is {0}'.format(vbucket_id)
        mc_active = client.memcached(key)
        mc_master = client.memcached_for_vbucket( vbucket_id )
        mc_replica = client.memcached_for_replica_vbucket(vbucket_id)

        new_seq=121

        pre_cas = mc_active.getMeta(key)[4]
        pre_seq = mc_active.getMeta(key)[3]
        pre_max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        all = mc_active.getMeta(key)
        get_meta_1 = mc_active.getMeta(key, request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_1)
        self.log.info('all meta data before set_meta_force {0}'.format(all))
        self.log.info('max_cas before set_meta_force {0}'.format(pre_max_cas))

        self.log.info('Forcing conflict_resolution to rev-id by matching inserting cas ')
        #set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, new_seq, pre_cas, '123456789',vbucket_id)
        set_with_meta_resp = mc_active.setWithMeta(key, '123456789', 0, 0, new_seq, pre_cas)
        cas_post = mc_active.getMeta(key)[4]
        all_post_meta = mc_active.getMeta(key)
        post_seq = mc_active.getMeta(key)[3]
        get_meta_2 = mc_active.getMeta(key, request_extended_meta_data=False)
        #print 'cr {0}'.format(get_meta_2)
        max_cas_post = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
        self.log.info('Expect RevId conflict_resolution to occur, and the last updated mutation to be the winner..')
        self.log.info('all meta data after set_meta_force {0}'.format(all_post_meta))

        self.assertTrue(max_cas_post == pre_cas, '[ERROR]Max cas {0} is not equal to original cas {1}'.format(max_cas_post, pre_cas))
        self.assertTrue(pre_seq < post_seq, '[ERROR]Pre rev id {0} is not greater than post rev id {1}'.format(pre_seq, post_seq))

        # failover that node
        self.log.info('Failing over node with active data {0}'.format(self.master))
        self.cluster.failover(self.servers, [self.master])

        self.log.info('Remove the node with active data {0}'.format(self.master))

        rebalance = self.cluster.async_rebalance(self.servers[:], [], [self.master])

        rebalance.result()
        self.check_rebalance_complete()
        replica_CAS = mc_replica.getMeta(key)[4]
        get_meta_resp = mc_replica.getMeta(key, request_extended_meta_data=False)
        #print 'replica CAS {0}'.format(replica_CAS)
        #print 'replica ext meta {0}'.format(get_meta_resp)

        # add the node back
        self.log.info('Add the node back, the max_cas should be healed')
        rebalance = self.cluster.async_rebalance(self.servers[-1:], [self.master], [])

        rebalance.result()

        # verify the CAS is good
        client = VBucketAwareMemcached(rest, self.bucket)
        mc_active = client.memcached(key)
        active_CAS = mc_active.getMeta(key)[4]
        #print 'active cas {0}'.format(active_CAS)

        self.assertTrue(replica_CAS == active_CAS, 'cas mismatch active: {0} replica {1}'.format(active_CAS, replica_CAS))
        #self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set')

    ''' Test getMeta on cas and max cas values for empty vbucket
    '''
    def test_cas_getMeta_empty_vBucket(self):
        self.log.info(' Starting test-getMeta')
        self._load_ops(ops='set', mutations=20)

        k=0
        all_keys = []
        while k<10:
            k+=1
            key = "{0}{1}".format(self.prefix, k)
            all_keys.append(key)

        vbucket_ids = self.client._get_vBucket_ids(all_keys)

        print('bucket_ids')
        for v in vbucket_ids:
            print(v)

        print('done')

        i=1111
        if i not in vbucket_ids and i <= 1023:
            vb_non_existing=i
        elif i>1023:
            i +=1
        else:
            self.log.info('ERROR generating empty vbucket id')

        vb_non_existing=vbucket_ids.pop()
        print('nominated vb_nonexisting is {0}'.format(vb_non_existing))
        mc_active = self.client.memcached(all_keys[0]) #Taking a temp connection to the mc.
        #max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(vb_non_existing) + ':max_cas'] )
        max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(all_keys[0])) + ':max_cas'] )
        self.assertTrue( max_cas != 0, msg='[ERROR] Max cas is non-zero')


    ''' Test addMeta on cas and max cas values for keys
    '''
    def test_meta_backup(self):
        self.log.info(' Starting test-getMeta')
        self._load_ops(ops='set', mutations=20)

        '''Do the backup on the bucket '''
        self.shell = RemoteMachineShellConnection(self.master)
        self.buckets = RestConnection(self.master).get_buckets()
        self.couchbase_login_info = "%s:%s" % (self.input.membase_settings.rest_username,
                                               self.input.membase_settings.rest_password)
        self.backup_location = "/tmp/backup"
        self.command_options = self.input.param("command_options", '')
        try:
            shell = RemoteMachineShellConnection(self.master)
            self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, self.command_options)

            time.sleep(5)
            shell.restore_backupFile(self.couchbase_login_info, self.backup_location, [bucket.name for bucket in self.buckets])
            print('Done with restore')
        finally:
            self._check_cas(check_conflict_resolution=False)

    ''' Common function to verify the expected values on cas
    '''
    def _check_cas(self, check_conflict_resolution=False, master=None, bucket=None, time_sync=None):
        self.log.info(' Verifying cas and max cas for the keys')
        #select_count = 20 #Verifying top 20 keys
        if master:
            self.rest = RestConnection(master)
            self.client = VBucketAwareMemcached(self.rest, bucket)

        k=0

        while k < self.items:
            key = "{0}{1}".format(self.prefix, k)
            k += 1
            mc_active = self.client.memcached(key)

            cas = mc_active.getMeta(key)[4]
            max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] )
            #print 'max_cas is {0}'.format(max_cas)
            self.assertTrue(cas == max_cas, '[ERROR]Max cas  is not 0 it is {0}'.format(cas))

            if check_conflict_resolution:
                get_meta_resp = mc_active.getMeta(key, request_extended_meta_data=False)
                if time_sync == 'enabledWithoutDrift':
                    self.assertTrue( get_meta_resp[5] == 1, msg='[ERROR] Metadata indicate conflict resolution is not set')
                elif time_sync == 'disabled':
                    self.assertTrue( get_meta_resp[5] == 0, msg='[ERROR] Metadata indicate conflict resolution is set')

    ''' Common function to add set delete etc operations on the bucket
    '''
    def _load_ops(self, ops=None, mutations=1, master=None, bucket=None):

        if master:
            self.rest = RestConnection(master)
        if bucket:
            self.client = VBucketAwareMemcached(self.rest, bucket)

        k=0
        payload = MemcachedClientHelper.create_value('*', self.value_size)

        while k < self.items:
            key = "{0}{1}".format(self.prefix, k)
            k += 1
            for i in range(mutations):
                if ops=='set':
                    #print 'set'
                    self.client.memcached(key).set(key, 0, 0, payload)
                elif ops=='add':
                    #print 'add'
                    self.client.memcached(key).add(key, 0, 0, payload)
                elif ops=='replace':
                    self.client.memcached(key).replace(key, 0, 0, payload)
                    #print 'Replace'
                elif ops=='delete':
                    #print 'delete'
                    self.client.memcached(key).delete(key)
                elif ops=='expiry':
                    #print 'expiry'
                    self.client.memcached(key).set(key, self.expire_time, 0, payload)
                elif ops=='touch':
                    #print 'touch'
                    self.client.memcached(key).touch(key, 10)

        self.log.info("Done with specified {0} ops".format(ops))

    '''Check if items are expired as expected'''
    def _check_expiry(self):
        time.sleep(self.expire_time+30)

        k=0
        while k<10:
            key = "{0}{1}".format(self.prefix, k)
            k += 1
            mc_active = self.client.memcached(key)
            cas = mc_active.getMeta(key)[4]
            self.log.info("Try to mutate an expired item with its previous cas {0}".format(cas))
            try:
                all = mc_active.getMeta(key)
                a=self.client.memcached(key).get(key)
                self.client.memcached(key).cas(key, 0, self.item_flag, cas, 'new')
                all = mc_active.getMeta(key)

                raise Exception("The item should already be expired. We can't mutate it anymore")
            except MemcachedError as error:
            #It is expected to raise MemcachedError becasue the key is expired.
                if error.status == ERR_NOT_FOUND:
                    self.log.info("<MemcachedError #%d ``%s''>" % (error.status, error.msg))
                    pass
                else:
                    raise Exception(error)
Example #44
0
    def test_n1ql_through_rest_with_redaction_enabled(self):
        gen_create = BlobGenerator('logredac',
                                   'logredac-',
                                   self.value_size,
                                   end=self.num_items)
        self._load_all_buckets(self.master, gen_create, "create", 0)
        shell = RemoteMachineShellConnection(self.master)
        type = shell.extract_remote_info().distribution_type
        curl_path = "curl"
        if type.lower() == 'windows':
            self.curl_path = "%scurl" % self.path

        shell.execute_command(
            "%s -u Administrator:password http://%s:%s/query/service -d 'statement=create primary index on default'"
            % (curl_path, self.master.ip, self.n1ql_port))

        shell.execute_command(
            "%s -u Administrator:password http://%s:%s/query/service -d 'statement=create index idx on default(fake)'"
            % (curl_path, self.master.ip, self.n1ql_port))

        shell.execute_command(
            "%s -u Administr:pasword http://%s:%s/query/service -d 'statement=select * from default'"
            % (curl_path, self.master.ip, self.n1ql_port))

        shell.execute_command(
            "%s http://Administrator:password@%s:%s/query/service -d 'statement=select * from default'"
            % (curl_path, self.master.ip, self.n1ql_port))

        shell.execute_command(
            "%s -u Administrator:password http://%s:%s/query/service -d 'statement=select * from default'"
            % (curl_path, self.master.ip, self.n1ql_port))

        # Get the CAS mismatch error by double inserting a document, second one will throw desired error
        shell.execute_command(
            "%s -u Administrator:password http://%s:%s/query/service -d 'statement=insert into default (KEY,VALUE) VALUES(\"test\",{\"field1\":\"test\"})'"
            % (curl_path, self.master.ip, self.n1ql_port))

        shell.execute_command(
            "%s -u Administrator:password http://%s:%s/query/service -d 'statement=insert into default (KEY,VALUE) VALUES(\"test\",{\"field1\":\"test\"})'"
            % (curl_path, self.master.ip, self.n1ql_port))

        # Delete a document that does not exist
        shell.execute_command(
            "%s -u Administrator:password http://%s:%s/query/service -d 'statement=DELETE FROM default USE KEYS \"fakekey\"})'"
            % (curl_path, self.master.ip, self.n1ql_port))

        #set log redaction level, collect logs, verify log files exist and verify them for redaction
        self.set_redaction_level()
        self.start_logs_collection()
        result = self.monitor_logs_collection()
        try:
            logs_path = result["perNode"]["ns_1@" +
                                          str(self.master.ip)]["path"]
        except KeyError:
            logs_path = result["perNode"]["[email protected]"]["path"]
        redactFileName = logs_path.split('/')[-1]
        nonredactFileName = logs_path.split('/')[-1].replace('-redacted', '')
        remotepath = logs_path[0:logs_path.rfind('/') + 1]
        self.verify_log_files_exist(remotepath=remotepath,
                                    redactFileName=redactFileName,
                                    nonredactFileName=nonredactFileName)
        self.verify_log_redaction(remotepath=remotepath,
                                  redactFileName=redactFileName,
                                  nonredactFileName=nonredactFileName,
                                  logFileName="ns_server.query.log")
        shell.disconnect()
    def test_arrkey_size_distribution(self):
        index_node = self.get_nodes_from_services_map(service_type="index",
                                                      get_all_nodes=False)
        rest = RestConnection(index_node)
        doc = {"indexer.statsPersistenceInterval": 60}
        rest.set_index_settings_internal(doc)

        string_70 = "x" * 70
        string_260 = "x" * 260
        string_1030 = "x" * 1030
        string_5000 = "x" * 5000
        string_103000 = "x" * 103000

        insert_query1 = 'INSERT INTO default (KEY, VALUE) VALUES ("id1", { "name" : ["%s","",null] })' % string_70
        insert_query2 = 'INSERT INTO default (KEY, VALUE) VALUES ("id2", { "name" : ["%s"] })' % string_260
        insert_query3 = 'INSERT INTO default (KEY, VALUE) VALUES ("id3", { "name" : ["%s"] })' % string_1030
        insert_query4 = 'INSERT INTO default (KEY, VALUE) VALUES ("id4", { "name" : ["%s","string1"] })' % string_5000
        insert_query5 = 'INSERT INTO default (KEY, VALUE) VALUES ("id5", { "name" : ["%s", "string2"] })' % string_103000

        self.n1ql_helper.run_cbq_query(query=insert_query1,
                                       server=self.n1ql_node)
        self.n1ql_helper.run_cbq_query(query=insert_query2,
                                       server=self.n1ql_node)
        self.n1ql_helper.run_cbq_query(query=insert_query3,
                                       server=self.n1ql_node)
        self.n1ql_helper.run_cbq_query(query=insert_query4,
                                       server=self.n1ql_node)
        self.n1ql_helper.run_cbq_query(query=insert_query5,
                                       server=self.n1ql_node)

        insert_query1 = 'INSERT INTO standard_bucket0 (KEY, VALUE) VALUES ("id4", { "name" : ["%s"] })' % string_5000
        self.n1ql_helper.run_cbq_query(query=insert_query1,
                                       server=self.n1ql_node)

        create_index_query1 = "CREATE INDEX idx ON default(distinct name) USING GSI"
        create_index_query2 = "CREATE INDEX idx2 ON default(join_mo) USING GSI"
        create_index_query3 = "CREATE INDEX idx ON standard_bucket0(distinct name) USING GSI"
        create_index_query4 = "CREATE INDEX idx2 ON standard_bucket0(join_mo) USING GSI"

        try:
            self.n1ql_helper.run_cbq_query(query=create_index_query1,
                                           server=self.n1ql_node)
            self.n1ql_helper.run_cbq_query(query=create_index_query2,
                                           server=self.n1ql_node)
            self.n1ql_helper.run_cbq_query(query=create_index_query3,
                                           server=self.n1ql_node)
            self.n1ql_helper.run_cbq_query(query=create_index_query4,
                                           server=self.n1ql_node)
        except Exception as ex:
            self.log.info(str(ex))
            self.fail(
                "index creation did not fail with expected error : {0}".format(
                    str(ex)))

        expected_distr = "{u'(0-64)': 2016, u'(257-1024)': 1, u'(65-256)': 1, u'(4097-102400)': 1, u'(1025-4096)': 1, u'(102401-max)': 1}"
        expected_distr2 = "{u'(0-64)': 2016, u'(257-1024)': 0, u'(65-256)': 0, u'(4097-102400)': 1, u'(1025-4096)': 0, u'(102401-max)': 0}"

        index_map = self.get_index_stats()
        self.log.info(index_map)
        self.verify_arrkey_size(index_map, 'default', expected_distr)
        self.verify_arrkey_size(index_map, 'standard_bucket0', expected_distr2)

        self.sleep(60)

        shell = RemoteMachineShellConnection(index_node)
        output1, error1 = shell.execute_command("killall -9 indexer")

        self.sleep(30)

        index_map = self.get_index_stats()
        self.log.info(index_map)
        self.verify_arrkey_size(index_map, 'default', expected_distr)
        self.verify_arrkey_size(index_map, 'standard_bucket0', expected_distr2)
Example #46
0
    def test_gsi_with_index_restart_redaction_enabled(self):
        # load bucket and do some ops
        self.set_indexer_logLevel("trace")
        self.set_projector_logLevel("trace")
        json_generator = JsonGenerator()
        gen_docs = json_generator.generate_all_type_documents_for_gsi(
            docs_per_day=self.doc_per_day, start=0)
        full_docs_list = self.generate_full_docs_list(gen_docs)
        n1ql_helper = N1QLHelper(use_rest=True,
                                 buckets=self.buckets,
                                 full_docs_list=full_docs_list,
                                 log=log,
                                 input=self.input,
                                 master=self.master)
        self.load(gen_docs)
        n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
        n1ql_helper.create_primary_index(using_gsi=True, server=n1ql_node)
        query_definition_generator = SQLDefinitionGenerator()
        query_definitions = query_definition_generator.generate_airlines_data_query_definitions(
        )
        query_definitions = query_definition_generator.filter_by_group(
            ["simple"], query_definitions)
        # set log redaction level, collect logs, verify log files exist and verify them for redaction
        self.set_redaction_level()
        self.start_logs_collection()
        # Create partial Index
        for query_definition in query_definitions:
            for bucket in self.buckets:
                create_query = query_definition.generate_index_create_query(
                    bucket.name)
                n1ql_helper.run_cbq_query(query=create_query, server=n1ql_node)

        for query_definition in query_definitions:
            for bucket in self.buckets:
                scan_query = query_definition.generate_query(
                    bucket=bucket.name)
                n1ql_helper.run_cbq_query(query=scan_query, server=n1ql_node)

        index_node = self.get_nodes_from_services_map(service_type="index")
        remote = RemoteMachineShellConnection(index_node)
        remote.stop_server()
        self.sleep(30)
        remote.start_server()
        self.sleep(30)
        for query_definition in query_definitions:
            for bucket in self.buckets:
                scan_query = query_definition.generate_query(
                    bucket=bucket.name)
                n1ql_helper.run_cbq_query(query=scan_query, server=n1ql_node)

        for query_definition in query_definitions:
            for bucket in self.buckets:
                drop_query = query_definition.generate_index_drop_query(
                    bucket=bucket.name)
                n1ql_helper.run_cbq_query(query=drop_query, server=n1ql_node)
        result = self.monitor_logs_collection()
        log.info(result)
        try:
            logs_path = result["perNode"]["ns_1@" +
                                          str(self.master.ip)]["path"]
        except KeyError:
            logs_path = result["perNode"]["[email protected]"]["path"]
        redactFileName = logs_path.split('/')[-1]
        nonredactFileName = logs_path.split('/')[-1].replace('-redacted', '')
        remotepath = logs_path[0:logs_path.rfind('/') + 1]
        log_file = self.input.param("log_file_name", "indexer.log")
        self.verify_log_files_exist(remotepath=remotepath,
                                    redactFileName=redactFileName,
                                    nonredactFileName=nonredactFileName)
        self.verify_log_redaction(remotepath=remotepath,
                                  redactFileName=redactFileName,
                                  nonredactFileName=nonredactFileName,
                                  logFileName="ns_server.{0}".format(log_file))
 def perform_warm_up(self):
     warmup_nodes = self.servers[-self.warmup_nodes:]
     for warmup_node in warmup_nodes:
         shell = RemoteMachineShellConnection(warmup_node)
         shell.stop_couchbase()
         shell.disconnect()
     self.sleep(20)
     for warmup_node in warmup_nodes:
         shell = RemoteMachineShellConnection(warmup_node)
         shell.start_couchbase()
         shell.disconnect()
     ClusterOperationHelper.wait_for_ns_servers_or_assert(
         warmup_nodes, self)
Example #48
0
    def test_flush_bucket_during_mutations(self):
        """
        Performs flush tests while data_loading is running in background
        Supported params:
        collection_mutations - If True, performs scope/collection create/drop
                               ops during bucket flush
        doc_mutations - If True, performs document CRUDs during bucket flush
        Note: Both the params cannot be False for a valid test
        :return:
        """
        collection_mutations = self.input.param("collection_mutations", True)
        doc_mutation = self.input.param("doc_mutation", True)

        node_dict = dict()
        kv_nodes = self.cluster_util.get_kv_nodes()
        for node in kv_nodes:
            node_dict[node] = dict()
            node_dict[node]["shell"] = RemoteMachineShellConnection(node)
            node_dict[node]["cbstat"] = Cbstats(node_dict[node]["shell"])
            node_dict[node]["scope_stats"] = dict()
            node_dict[node]["collection_stats"] = dict()

            # Fetch scope/collection stats before flush for validation
            node_dict[node]["scope_stats"]["pre_flush"] = \
                node_dict[node]["cbstat"].get_scopes(self.bucket)
            node_dict[node]["collection_stats"]["pre_flush"] = \
                node_dict[node]["cbstat"].get_collections(self.bucket)

        doc_ttl, durability_level = \
            self.__get_random_doc_ttl_and_durability_level()
        mutate_spec = self.__get_mutate_spec(doc_ttl, durability_level)

        doc_mutation_spec = {
            MetaCrudParams.COLLECTIONS_TO_FLUSH: 0,
            MetaCrudParams.COLLECTIONS_TO_DROP: 0,
            MetaCrudParams.SCOPES_TO_DROP: 0,
            MetaCrudParams.SCOPES_TO_ADD_PER_BUCKET: 0,
            MetaCrudParams.COLLECTIONS_TO_ADD_FOR_NEW_SCOPES: 0,
            MetaCrudParams.COLLECTIONS_TO_ADD_PER_BUCKET: 0,

            "doc_crud": {
                MetaCrudParams.DocCrud.COMMON_DOC_KEY: "test_collections",
                MetaCrudParams.DocCrud.CREATE_PERCENTAGE_PER_COLLECTION: 20,
                MetaCrudParams.DocCrud.READ_PERCENTAGE_PER_COLLECTION: 20,
                MetaCrudParams.DocCrud.UPDATE_PERCENTAGE_PER_COLLECTION: 20,
                MetaCrudParams.DocCrud.DELETE_PERCENTAGE_PER_COLLECTION: 20,
            },
            # Doc_loading task options
            MetaCrudParams.DOC_TTL: doc_ttl,
            MetaCrudParams.DURABILITY_LEVEL: durability_level,

            MetaCrudParams.RETRY_EXCEPTIONS: [],
            MetaCrudParams.IGNORE_EXCEPTIONS: [],

            MetaCrudParams.COLLECTIONS_CONSIDERED_FOR_CRUD: "all",
            MetaCrudParams.SCOPES_CONSIDERED_FOR_CRUD: "all",
            MetaCrudParams.BUCKETS_CONSIDERED_FOR_CRUD: "all"
        }
        if not doc_mutation:
            del mutate_spec["doc_crud"]
            mutate_spec[MetaCrudParams.COLLECTIONS_CONSIDERED_FOR_CRUD] = 0
        elif not collection_mutations:
            mutate_spec = doc_mutation_spec

        # To avoid printing error table during doc_loading since failures
        # are expected due to documents getting flushed
        mutate_spec[MetaCrudParams.SKIP_READ_ON_ERROR] = True

        self.log.info("Running mutations with doc_ttl: %s, durability: %s"
                      % (doc_ttl, durability_level))
        mutate_task = \
            self.bucket_util.run_scenario_from_spec(
                self.task,
                self.cluster,
                self.cluster.buckets,
                mutate_spec,
                mutation_num=0,
                async_load=True,
                validate_task=False,
                batch_size=self.batch_size)

        self.sleep(5, "Wait for mutation task to start")

        self.log.info("Flushing bucket: %s" % self.bucket.name)
        self.bucket_util.update_bucket_property(self.cluster.master,
                                                self.bucket,
                                                flush_enabled=1)
        self.bucket_util.flush_bucket(self.cluster, self.bucket)

        # Wait for mutation task to complete
        self.task_manager.get_task_result(mutate_task)

        if not collection_mutations:
            # Validate only the scope/collection hierarchy
            for node in kv_nodes:
                # Fetch scope/collection stats after flush for validation
                node_dict[node]["scope_stats"]["post_flush"] = \
                    node_dict[node]["cbstat"].get_scopes(self.bucket)
                node_dict[node]["collection_stats"]["post_flush"] = \
                    node_dict[node]["cbstat"].get_collections(self.bucket)

                # Validate pre and post flush stats
                if node_dict[node]["scope_stats"]["pre_flush"]["manifest_uid"] \
                        != node_dict[node]["scope_stats"]["post_flush"]["manifest_uid"]:
                    self.log_failure("%s - Scope stats mismatch after flush")
                if node_dict[node]["collection_stats"]["pre_flush"]["manifest_uid"] \
                        != node_dict[node]["collection_stats"]["post_flush"]["manifest_uid"]:
                    self.log_failure("%s - Collection stats mismatch after flush")

                # Close node's shell connections
                node_dict[node]["shell"].disconnect()

        # Fails test case in case of any detected failure
        self.validate_test_failure()
Example #49
0
 def _start_replication(self, server, bucket):
     shell = RemoteMachineShellConnection(server)
     shell.execute_cbepctl(self.bucket, "start", "", "", 0)
     shell.execute_cbepctl(self.bucket, "", "set tap_param",
                           "tap_throttle_queue_cap", 1000000)
     shell.disconnect()
Example #50
0
    def setUp(self):
        super(QueryTests, self).setUp()
        self.expiry = self.input.param("expiry", 0)
        self.batch_size = self.input.param("batch_size", 1)
        self.scan_consistency = self.input.param("scan_consistency",
                                                 "request_plus")
        self.skip_cleanup = self.input.param("skip_cleanup", False)
        self.run_async = self.input.param("run_async", True)
        self.version = self.input.param("cbq_version", "git_repo")
        for server in self.servers:
            rest = RestConnection(server)
            temp = rest.cluster_status()
            self.log.info("Initial status of {0} cluster is {1}".format(
                server.ip, temp['nodes'][0]['status']))
            while (temp['nodes'][0]['status'] == 'warmup'):
                self.log.info("Waiting for cluster to become healthy")
                self.sleep(5)
                temp = rest.cluster_status()
            self.log.info("current status of {0}  is {1}".format(
                server.ip, temp['nodes'][0]['status']))

        indexer_node = self.get_nodes_from_services_map(service_type="index",
                                                        get_all_nodes=True)
        # Set indexer storage mode
        indexer_rest = RestConnection(indexer_node[0])
        doc = {"indexer.settings.storage_mode": self.gsi_type}
        indexer_rest.set_index_settings_internal(doc)
        doc = {"indexer.api.enableTestServer": True}
        indexer_rest.set_index_settings_internal(doc)
        self.indexer_scanTimeout = self.input.param("indexer_scanTimeout",
                                                    None)
        if self.indexer_scanTimeout is not None:
            for server in indexer_node:
                rest = RestConnection(server)
                rest.set_index_settings({
                    "indexer.settings.scan_timeout":
                    self.indexer_scanTimeout
                })
        if self.input.tuq_client and "client" in self.input.tuq_client:
            self.shell = RemoteMachineShellConnection(
                self.input.tuq_client["client"])
        else:
            self.shell = RemoteMachineShellConnection(self.master)
        self.use_gsi_for_primary = self.input.param("use_gsi_for_primary",
                                                    True)
        self.use_gsi_for_secondary = self.input.param("use_gsi_for_secondary",
                                                      True)
        self.create_primary_index = self.input.param("create_primary_index",
                                                     True)
        self.use_rest = self.input.param("use_rest", True)
        self.max_verify = self.input.param("max_verify", None)
        self.buckets = RestConnection(self.master).get_buckets()
        self.docs_per_day = self.input.param("doc-per-day", 49)
        self.item_flag = self.input.param("item_flag", 4042322160)
        self.n1ql_port = self.input.param("n1ql_port", 8093)
        self.dataset = self.input.param("dataset", "default")
        self.value_size = self.input.param("value_size", 1024)
        self.doc_ops = self.input.param("doc_ops", False)
        self.create_ops_per = self.input.param("create_ops_per", 0)
        self.expiry_ops_per = self.input.param("expiry_ops_per", 0)
        self.delete_ops_per = self.input.param("delete_ops_per", 0)
        self.update_ops_per = self.input.param("update_ops_per", 0)
        self.gens_load = self.generate_docs(self.docs_per_day)
        if self.input.param("gomaxprocs", None):
            self.n1ql_helper.configure_gomaxprocs()
        self.full_docs_list = self.generate_full_docs_list(self.gens_load)
        self.gen_results = TuqGenerators(self.log, self.full_docs_list)
        verify_data = False
        if self.scan_consistency != "request_plus":
            verify_data = True
        self.load(self.gens_load,
                  flag=self.item_flag,
                  verify_data=verify_data,
                  batch_size=self.batch_size)
        if self.doc_ops:
            self.ops_dist_map = self.calculate_data_change_distribution(
                create_per=self.create_ops_per,
                update_per=self.update_ops_per,
                delete_per=self.delete_ops_per,
                expiry_per=self.expiry_ops_per,
                start=0,
                end=self.docs_per_day)
            self.log.info(self.ops_dist_map)
            self.docs_gen_map = self.generate_ops_docs(self.docs_per_day, 0)
            self.full_docs_list_after_ops = self.generate_full_docs_list_after_ops(
                self.docs_gen_map)
        # Define Helper Method which will be used for running n1ql queries, create index, drop index
        self.n1ql_helper = N1QLHelper(version=self.version,
                                      shell=self.shell,
                                      use_rest=self.use_rest,
                                      max_verify=self.max_verify,
                                      buckets=self.buckets,
                                      item_flag=self.item_flag,
                                      n1ql_port=self.n1ql_port,
                                      full_docs_list=self.full_docs_list,
                                      log=self.log,
                                      input=self.input,
                                      master=self.master)
        self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
        self.log.info(self.n1ql_node)
        #self.n1ql_helper._start_command_line_query(self.n1ql_node)
        # sleep to avoid race condition during bootstrap
        if self.create_primary_index:
            try:
                self.n1ql_helper.create_primary_index(
                    using_gsi=self.use_gsi_for_primary, server=self.n1ql_node)
            except Exception as ex:
                self.log.info(ex)
                raise ex
Example #51
0
 def _kill_nodes(self, nodes, servers, bucket_name):
     self.reboot = self.input.param("reboot", False)
     if not self.reboot:
         for node in nodes:
             _node = {
                 "ip": node.ip,
                 "port": node.port,
                 "username": self.servers[0].rest_username,
                 "password": self.servers[0].rest_password
             }
             node_rest = RestConnection(_node)
             _mc = MemcachedClientHelper.direct_client(_node, bucket_name)
             self.log.info("restarted the node %s:%s" %
                           (node.ip, node.port))
             pid = _mc.stats()["pid"]
             command = "os:cmd(\"kill -9 {0} \")".format(pid)
             self.log.info(command)
             killed = node_rest.diag_eval(command)
             self.log.info("killed ??  {0} ".format(killed))
             _mc.close()
     else:
         for server in servers:
             shell = RemoteMachineShellConnection(server)
             command = "reboot"
             output, error = shell.execute_command(command)
             shell.log_command_output(output, error)
             shell.disconnect()
             time.sleep(self.wait_timeout * 8)
             shell = RemoteMachineShellConnection(server)
             command = "/sbin/iptables -F"
             output, error = shell.execute_command(command)
             command = "iptables -F"
             output, error = shell.execute_command(command)
             shell.log_command_output(output, error)
             shell.disconnect()
Example #52
0
 def _start_server(self, server):
     shell = RemoteMachineShellConnection(server)
     shell.start_server()
     shell.disconnect()
Example #53
0
 def kill_cbft_process(server):
     NodeHelper._log.info("Killing cbft on server: {0}".format(server))
     shell = RemoteMachineShellConnection(server)
     shell.kill_cbft_process()
     shell.disconnect()
Example #54
0
    def reboot_server_new(server, test_case, wait_timeout=120):
        """Reboot a server and wait for couchbase server to run.
        @param server: server object, which needs to be rebooted.
        @param test_case: test case object, since it has assert() function
                        which is used by wait_for_ns_servers_or_assert
                        to throw assertion.
        @param wait_timeout: timeout to whole reboot operation.
        """
        # self.log.info("Rebooting server '{0}'....".format(server.ip))
        shell = RemoteMachineShellConnection(server)
        shell.info = shell.extract_remote_info()

        if shell.info.type.lower() == OS.WINDOWS:
            o, r = shell.execute_command("{0} -r -f -t 0".format(
                COMMAND.SHUTDOWN))
        elif shell.info.type.lower() == OS.LINUX:
            o, r = shell.execute_command(COMMAND.REBOOT)
        shell.log_command_output(o, r)
        # wait for restart and warmup on all server
        if shell.info.type.lower() == OS.WINDOWS:
            time.sleep(wait_timeout * 5)
        else:
            time.sleep(wait_timeout / 6)
        end_time = time.time() + 400
        while time.time() < end_time:
            try:
                if shell.info.type.lower() == "windows":
                    o, r = shell.execute_command(
                        'netsh advfirewall set publicprofile state off')
                    shell.log_command_output(o, r)
                    o, r = shell.execute_command(
                        'netsh advfirewall set privateprofile state off')
                    shell.log_command_output(o, r)
                else:
                    # disable firewall on these nodes
                    o, r = shell.execute_command("iptables -F")
                    shell.log_command_output(o, r)
                    o, r = shell.execute_command("/sbin/iptables --list")
                    shell.log_command_output(o, r)
                if not o:
                    raise ("Node not reachable yet")
                break
            except:
                print "Node not reachable yet, will try after 10 secs"
                time.sleep(10)

        o, r = shell.execute_command("iptables -F")
        # wait till server is ready after warmup
        ClusterOperationHelper.wait_for_ns_servers_or_assert(
            [server], test_case, wait_if_warmup=True)
Example #55
0
 def stop_couchbase(server):
     """Warmp up server
     """
     shell = RemoteMachineShellConnection(server)
     shell.stop_couchbase()
     shell.disconnect()
Example #56
0
    def disable_firewall(server):
        """Disable firewall to put restriction to replicate items in XDCR.
        @param server: server object to disable firewall
        @param rep_direction: replication direction unidirection/bidirection
        """
        shell = RemoteMachineShellConnection(server)
        shell.info = shell.extract_remote_info()

        if shell.info.type.lower() == "windows":
            output, error = shell.execute_command(
                'netsh advfirewall set publicprofile state off')
            shell.log_command_output(output, error)
            output, error = shell.execute_command(
                'netsh advfirewall set privateprofile state off')
            shell.log_command_output(output, error)
            # for details see RemoteUtilHelper.enable_firewall for windows
            output, error = shell.execute_command(
                'netsh advfirewall firewall delete rule name="block erl.exe in"'
            )
            shell.log_command_output(output, error)
            output, error = shell.execute_command(
                'netsh advfirewall firewall delete rule name="block erl.exe out"'
            )
            shell.log_command_output(output, error)
        else:
            o, r = shell.execute_command("/sbin/iptables --list")
            shell.log_command_output(o, r)
            if not o:
                raise ("Node not reachable yet")


#             o, r = shell.execute_command(
#                 "/sbin/iptables -A INPUT -p tcp -i eth0 --dport 1000:65535 -j ACCEPT")
#             shell.log_command_output(o, r)
#             o, r = shell.execute_command(
#                 "/sbin/iptables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT")
#             shell.log_command_output(o, r)
#             # self.log.info("enabled firewall on {0}".format(server))
            o, r = shell.execute_command("iptables -F")
            shell.log_command_output(o, r)
        shell.disconnect()
    def test_retry_connections_on_errors_before_restart(self):
        """
        CBQE-3373: Do not restart pipeline as soon as connection errors are
        detected, backoff and retry 5 times before trying to restart pipeline.
        """
        passed = False
        # start data load after setting up xdcr
        load_tasks = self.setup_xdcr_async_load()
        goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0])\
                     + '/goxdcr.log*'

        # block port 11210 on target to simulate a connection error
        shell = RemoteMachineShellConnection(self.dest_master)
        out, err = shell.execute_command(
            "/sbin/iptables -A INPUT -p tcp --dport"
            " 11210 -j DROP")
        shell.log_command_output(out, err)
        out, err = shell.execute_command("/sbin/iptables -L")
        shell.log_command_output(out, err)

        # complete loading
        for task in load_tasks:
            task.result()

        # wait for goxdcr to detect i/o timeout and try repairing
        self.sleep(self._wait_timeout * 5)

        # unblock port 11210 so replication can continue
        out, err = shell.execute_command(
            "/sbin/iptables -D INPUT -p tcp --dport"
            " 11210 -j DROP")
        shell.log_command_output(out, err)
        out, err = shell.execute_command("/sbin/iptables -L")
        shell.log_command_output(out, err)
        shell.disconnect()

        # check logs for traces of retry attempts
        for node in self.src_cluster.get_nodes():
            count1 = NodeHelper.check_goxdcr_log(
                node, "Failed to repair connections to target cluster",
                goxdcr_log)
            count2 = NodeHelper.check_goxdcr_log(
                node, "Failed to set up connections to target cluster",
                goxdcr_log)
            count = count1 + count2
            if count > 0:
                self.log.info('SUCCESS: We tried to repair connections before'
                              ' restarting pipeline')
                passed = True

        if not passed:
            self.fail(
                "No attempts were made to repair connections on %s before"
                " restarting pipeline" % self.src_cluster.get_nodes())
        self.verify_results()
Example #58
0
 def kill_memcached(server):
     """Kill memcached process running on server.
     """
     shell = RemoteMachineShellConnection(server)
     shell.kill_memcached()
     shell.disconnect()
 def __start_cb_server(self, node):
     shell = RemoteMachineShellConnection(node)
     shell.start_couchbase()
     shell.disconnect()
    def test_rollback(self):
        bucket = self.src_cluster.get_buckets()[0]
        nodes = self.src_cluster.get_nodes()

        # Stop Persistence on Node A & Node B
        for node in nodes:
            mem_client = MemcachedClientHelper.direct_client(node, bucket)
            mem_client.stop_persistence()

        goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0])\
                     + '/goxdcr.log*'
        self.setup_xdcr()

        self.src_cluster.pause_all_replications()

        gen = BlobGenerator("C1-",
                            "C1-",
                            self._value_size,
                            end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.src_cluster.resume_all_replications()

        # Perform mutations on the bucket
        self.async_perform_update_delete()

        rest1 = RestConnection(self.src_cluster.get_master_node())
        rest2 = RestConnection(self.dest_cluster.get_master_node())

        # Fetch count of docs in src and dest cluster
        _count1 = rest1.fetch_bucket_stats(
            bucket=bucket.name)["op"]["samples"]["curr_items"][-1]
        _count2 = rest2.fetch_bucket_stats(
            bucket=bucket.name)["op"]["samples"]["curr_items"][-1]

        self.log.info(
            "Before rollback src cluster count = {0} dest cluster count = {1}".
            format(_count1, _count2))

        # Kill memcached on Node A so that Node B becomes master
        shell = RemoteMachineShellConnection(
            self.src_cluster.get_master_node())
        shell.kill_memcached()

        # Start persistence on Node B
        mem_client = MemcachedClientHelper.direct_client(nodes[1], bucket)
        mem_client.start_persistence()

        # Failover Node B
        failover_task = self.src_cluster.async_failover()
        failover_task.result()

        # Wait for Failover & rollback to complete
        self.sleep(60)

        # Fetch count of docs in src and dest cluster
        _count1 = rest1.fetch_bucket_stats(
            bucket=bucket.name)["op"]["samples"]["curr_items"][-1]
        _count2 = rest2.fetch_bucket_stats(
            bucket=bucket.name)["op"]["samples"]["curr_items"][-1]

        self.log.info(
            "After rollback src cluster count = {0} dest cluster count = {1}".
            format(_count1, _count2))

        self.assertTrue(
            self.src_cluster.wait_for_outbound_mutations(),
            "Mutations in source cluster not replicated to target after rollback"
        )
        self.log.info(
            "Mutations in source cluster replicated to target after rollback")

        count = NodeHelper.check_goxdcr_log(
            nodes[0], "Received rollback from DCP stream", goxdcr_log)
        self.assertGreater(count, 0, "rollback did not happen as expected")
        self.log.info("rollback happened as expected")