コード例 #1
0
    def run(self):
        remote_client = RemoteMachineShellConnection(self.server)
        now = datetime.now()
        day = now.day
        month = now.month
        year = now.year
        hour = now.timetuple().tm_hour
        minute = now.timetuple().tm_min
        file_name = "%s-%s%s%s-%s%s-couch.tar.gz" % (self.server.ip, month, day, year, hour, minute)
        print "Collecting data files from %s\n" % self.server.ip

        remote_client.extract_remote_info()
        data_path = self.__get_data_path(os_type=remote_client.info.type.lower())
        output, error = remote_client.execute_command(
            "tar -zcvf {0} '{1}' >/dev/null 2>&1".format(file_name, data_path)
        )
        print "\n".join(output)
        print "\n".join(error)

        user_path = "/home/"
        if self.server.ssh_username == "root":
            user_path = "/"
        remote_path = "%s%s" % (user_path, self.server.ssh_username)
        status = remote_client.file_exists(remote_path, file_name)
        if not status:
            raise Exception("%s doesn't exists on server" % file_name)
        status = remote_client.get_file(remote_path, file_name, "%s/%s" % (self.path, file_name))
        if not status:
            raise Exception("Fail to download zipped logs from %s" % self.server.ip)
        remote_client.execute_command("rm -f %s" % os.path.join(remote_path, file_name))
        remote_client.disconnect()
コード例 #2
0
 def convert_to_hostname(self, servers_with_hostnames, username='******', password='******'):
     try:
         hostname = []
         for server in servers_with_hostnames:
             shell = RemoteMachineShellConnection(server)
             info = shell.extract_remote_info()
             domain = ''.join(info.domain[0])
             if not domain:
                 output = shell.execute_command_raw('nslookup %s' % info.hostname[0])
                 print output
                 self.fail("Domain is not defined, couchbase cannot be configured correctly. NOT A BUG. CONFIGURATION ISSUE")
             hostname.append(info.hostname[0] + "." + domain)
             master_rest = RestConnection(server)
             current_hostname = master_rest.get_nodes_self().hostname
             self.log.info("get_node_self function returned : {0}".format(current_hostname))
             if server.ip in current_hostname:
                 self.log.info("Node {0} is referred via IP. Need to be referred with hostname. Changing the name of the node!!".format(server.ip))
                 version = RestConnection(server).get_nodes_self().version
                 if version.startswith("1.8.1") or version.startswith("2.0.0") or version.startswith("2.0.1"):
                     RemoteUtilHelper.use_hostname_for_server_settings(server)
                     master_rest.init_cluster()
                 else:
                     master_rest.init_cluster()
                     master_rest.rename_node(username=username, password=password, port='', hostname=hostname[-1])
             else:
                 self.log.info("Node {0} already referred via hostname. No need to convert the name".format(server.ip))
     finally:
         shell.disconnect()
     return hostname
コード例 #3
0
 def convert_to_hostname(self, servers_with_hostnames):
     try:
         hostname = []
         for server in servers_with_hostnames:
             shell = RemoteMachineShellConnection(server)
             info = shell.extract_remote_info()
             domain = ''.join(info.domain[0])
             hostname.append(info.hostname[0] + "." + domain)
             master_rest = RestConnection(server)
             var = master_rest.get_nodes_self().hostname
             flag = True if server.ip in var else False
             self.log.info("get_node_self function returned : {0}".format(var))
             if flag:
                 self.log.info("Node {0} is referred via IP. Need to be referred with hostname. Changing the name of the node!!".format(server.ip))
                 version = RestConnection(server).get_nodes_self().version
                 if version.startswith("1.8.1") or version.startswith("2.0.0") or version.startswith("2.0.1"):
                     RemoteUtilHelper.use_hostname_for_server_settings(server)
                     obj = RestConnection(server)
                     obj.init_cluster()
                 else:
                     obj = RestConnection(server)
                     obj.init_cluster()
                     var = master_rest.rename_node(username='******', password='******', port='', hostname=hostname[-1])
             else:
                 self.log.info("Node {0} already referred via hostname. No need to convert the name".format(server.ip))
     finally:
         shell.disconnect()
     return hostname
コード例 #4
0
    def _save_snapshot(self, server, bucket, file_base=None):
        """Save data files to a snapshot"""

        src_data_path = os.path.dirname(server.data_path or
                                        testconstants.COUCHBASE_DATA_PATH)
        dest_data_path = "{0}-snapshots".format(src_data_path)

        self.log.info("server={0}, src_data_path={1}, dest_data_path={2}"
                      .format(server.ip, src_data_path, dest_data_path))

        shell = RemoteMachineShellConnection(server)

        build_name, short_version, full_version = \
            shell.find_build_version("/opt/couchbase/", "VERSION.txt", "cb")

        dest_file = self._build_tar_name(bucket, full_version, file_base)

        self._exec_and_log(shell, "mkdir -p {0}".format(dest_data_path))

        # save as gzip file, if file exsits, overwrite
        # TODO: multiple buckets
        zip_cmd = "cd {0}; tar -cvzf {1}/{2} {3} {3}-data _*"\
            .format(src_data_path, dest_data_path, dest_file, bucket)
        self._exec_and_log(shell, zip_cmd)

        shell.disconnect()
        return True
コード例 #5
0
    def run(self):
        remote_client = RemoteMachineShellConnection(self.server)
        now = datetime.now()
        day = now.day
        month = now.month
        year = now.year
        hour = now.timetuple().tm_hour
        min = now.timetuple().tm_min
        file_name = "%s-%s%s%s-%s%s-diag.zip" % (self.server.ip,
                                                 month, day, year, hour, min)
        print "Collecting logs from %s\n" % self.server.ip
        output, error = remote_client.execute_cbcollect_info(file_name)
        print "\n".join(output)
        print "\n".join(error)

        user_path = "/home/"
        if self.server.ssh_username == "root":
            user_path = "/"
        remote_path = "%s%s" % (user_path, self.server.ssh_username)
        status = remote_client.file_exists(remote_path, file_name)
        if not status:
            raise Exception("%s doesn't exists on server" % file_name)
        status = remote_client.get_file(remote_path, file_name,
                                        "%s/%s" % (self.path, file_name))
        if status:
            print "Downloading zipped logs from %s" % self.server.ip
        else:
            raise Exception("Fail to download zipped logs from %s"
                            % self.server.ip)
        remote_client.disconnect()
コード例 #6
0
    def run(self):
        file_name = "%s-%s-diag.zip" % (self.server.ip, time_stamp())
        if not self.local:
            from lib.remote.remote_util import RemoteMachineShellConnection

            remote_client = RemoteMachineShellConnection(self.server)
            print "Collecting logs from %s\n" % self.server.ip
            output, error = remote_client.execute_cbcollect_info(file_name)
            print "\n".join(output)
            print "\n".join(error)

            user_path = "/home/"
            if remote_client.info.distribution_type.lower() == "mac":
                user_path = "/Users/"
            else:
                if self.server.ssh_username == "root":
                    user_path = "/"

            remote_path = "%s%s" % (user_path, self.server.ssh_username)
            status = remote_client.file_exists(remote_path, file_name)
            if not status:
                raise Exception("%s doesn't exists on server" % file_name)
            status = remote_client.get_file(remote_path, file_name, "%s/%s" % (self.path, file_name))
            if status:
                print "Downloading zipped logs from %s" % self.server.ip
            else:
                raise Exception("Fail to download zipped logs from %s" % self.server.ip)
            remote_client.execute_command("rm -f %s" % os.path.join(remote_path, file_name))
            remote_client.disconnect()
コード例 #7
0
ファイル: csvdatatest.py プロジェクト: arod1987/testrunner
 def create_and_restore_csv(self):
     try:
         self.__load_data()
         shell_obj = RemoteMachineShellConnection(self.master)
         self.log.info("Removing backup folder if already present")
         info = shell_obj.extract_remote_info()
         path = "/tmp/backup/"
         if info.type.lower() == "windows":
             path = "/cygdrive/c" + path
         #TODO : Check for mac also
         shell_obj.delete_files(path)
         create_dir = "mkdir " + path
         data_type = "csv:"
         destination = path + "data.csv"
         shell_obj.execute_command(create_dir)
         source = "http://*****:*****@ %s" % destination)
         source, destination = destination, source
         options = "-B standard_bucket0" + self.username_arg + self.password_arg
         self.log.info("Restoring data....!")
         shell_obj.execute_cbtransfer(source, destination, options)
         self.sleep(10)
         self.log.info("Checking whether number of items loaded match with the number of items restored.")
         rest = RestConnection(self.master)
         itemCount = rest.get_bucket_json('standard_bucket0')['basicStats']['itemCount']
         self.assertEqual(itemCount, self.num_items, msg="Number of items loaded do no match\
         with the number of items restored. Number of items loaded is {0} \
         but number of items restored is {1}".format(self.num_items, itemCount))
         self.log.info("Number of items loaded = Number of items restored. Pass!!")
     finally:
         shell_obj.disconnect()
コード例 #8
0
 def stop_measure_sched_delay(self):
     for server in self.servers:
         shell = RemoteMachineShellConnection(server)
         cmd = "killall -9 -r .*measure-sched-delays"
         output, error = shell.execute_command(cmd)
         shell.log_command_output(output, error)
         shell.disconnect()
         self.log.info("measure-sched-delays was stopped on {0}".format(server.ip))
コード例 #9
0
 def set_ep_compaction(self, comp_ratio):
     """Set up ep_engine side compaction ratio"""
     for server in self.input.servers:
         shell = RemoteMachineShellConnection(server)
         cmd = "/opt/couchbase/bin/cbepctl localhost:11210 "\
               "set flush_param db_frag_threshold {0}".format(comp_ratio)
         self._exec_and_log(shell, cmd)
         shell.disconnect()
コード例 #10
0
 def fetch_logs(self):
     for server in self.servers:
         shell = RemoteMachineShellConnection(server)
         files = shell.list_files(self.path + "/")
         files = [file for file in files if file["file"].startswith("sched-delay")]
         for file in files:
             shell.copy_file_remote_to_local(file["path"] + file["file"], os.getcwd() + "/" + file["file"])
         self.log.info("copied {0} from {1}".format([file["file"] for file in files] , server.ip))
         shell.disconnect()
コード例 #11
0
ファイル: eventing_base.py プロジェクト: membase/testrunner
 def reboot_server(self, server):
     remote_client = RemoteMachineShellConnection(server)
     remote_client.reboot_node()
     remote_client.disconnect()
     # wait for restart and warmup on all node
     self.sleep(self.wait_timeout * 5)
     # disable firewall on these nodes
     self.stop_firewall_on_node(server)
     # wait till node is ready after warmup
     ClusterOperationHelper.wait_for_ns_servers_or_assert([server], self, wait_if_warmup=True)
コード例 #12
0
    def set_ep_param(self, type, param, value):
        """
        Set ep-engine specific param, using cbepctl

        type: paramter type, e.g: flush_param, tap_param, etc
        """
        bucket = Bucket(name=self.buckets[0], authType="sasl", saslPassword="")
        for server in self.input.servers:
            shell = RemoteMachineShellConnection(server)
            shell.execute_cbepctl(bucket,
                                  "", "set %s" % type, param, value)
            shell.disconnect()
コード例 #13
0
    def set_up_proxy(self, bucket=None):
        """Set up and start Moxi"""

        if self.input.moxis:
            self.log.info("setting up proxy")

            bucket = bucket or self.param('bucket', 'default')

            shell = RemoteMachineShellConnection(self.input.moxis[0])
            shell.start_moxi(self.input.servers[0].ip, bucket,
                             self.input.moxis[0].port)
            shell.disconnect()
コード例 #14
0
ファイル: tuq_tokens.py プロジェクト: bharath-gp/testrunner
    def load_sample_buckets(self, bucketName="beer-sample" ):
        """
        Load the specified sample bucket in Couchbase
        """
        #self.cluster.bucket_delete(server=self.master, bucket="default")
        server = self.master
        shell = RemoteMachineShellConnection(server)
        shell.execute_command("""curl -v -u Administrator:password \
                             -X POST http://{0}:8091/sampleBuckets/install \
                          -d '["{1}"]'""".format(server.ip, bucketName))
        self.sleep(30)

        shell.disconnect()
コード例 #15
0
ファイル: eventing_base.py プロジェクト: membase/testrunner
 def kill_erlang_service(self, server):
     remote_client = RemoteMachineShellConnection(server)
     os_info = remote_client.extract_remote_info()
     log.info("os_info : {0}", os_info)
     if os_info.type.lower() == "windows":
         remote_client.kill_erlang(os="windows")
     else:
         remote_client.kill_erlang()
     remote_client.start_couchbase()
     remote_client.disconnect()
     # wait for restart and warmup on all node
     self.sleep(self.wait_timeout * 2)
     # wait till node is ready after warmup
     ClusterOperationHelper.wait_for_ns_servers_or_assert([server], self, wait_if_warmup=True)
コード例 #16
0
 def kill_erlang_service(self, server):
     remote_client = RemoteMachineShellConnection(server)
     os_info = remote_client.extract_remote_info()
     log.info("os_info : {0}".format(os_info))
     if os_info.type.lower() == "windows":
         remote_client.kill_erlang(os="windows")
     else:
         remote_client.kill_erlang()
     remote_client.start_couchbase()
     remote_client.disconnect()
     # wait for restart and warmup on all node
     self.sleep(self.wait_timeout * 2)
     # wait till node is ready after warmup
     ClusterOperationHelper.wait_for_ns_servers_or_assert(
         [server], self, wait_if_warmup=True)
コード例 #17
0
 def fetch_logs(self):
     for server in self.servers:
         shell = RemoteMachineShellConnection(server)
         files = shell.list_files(self.path + "/")
         files = [
             file for file in files
             if file["file"].startswith("sched-delay")
         ]
         for file in files:
             shell.copy_file_remote_to_local(
                 file["path"] + file["file"],
                 os.getcwd() + "/" + file["file"])
         self.log.info("copied {0} from {1}".format(
             [file["file"] for file in files], server.ip))
         shell.disconnect()
コード例 #18
0
 def start_measure_sched_delays(self):
     for server in self.servers:
         shell = RemoteMachineShellConnection(server)
         exists = shell.file_exists(self.path, 'measure-sched-delays')
         if not exists:
             shell.copy_file_local_to_remote("resources/linux/measure-sched-delays.tar.gz", "{0}.tar.gz".format(self.path))
             output, error = shell.execute_command_raw("cd /tmp/; tar -xvzf measure-sched-delays.tar.gz")
             shell.log_command_output(output, error)
             output, error = shell.execute_command_raw("cd {0}; ./configure; make".format(self.path))
             shell.log_command_output(output, error)
         else:
             self.log.info("measure-sched-delays already deployed on {0}:{1}".format(server.ip, self.path))
         self.stop_measure_sched_delay()
         output, error = shell.execute_command_raw("rm -rf {0}/sched-delay*".format(self.path))
         shell.log_command_output(output, error)
         self.launch_measure_sched_delay(shell, file="sched-delay-{0}".format(server.ip))
         shell.disconnect()
コード例 #19
0
 def rename_nodes(self, servers, names={}):
     hostnames={}
     for server in servers:
         shell = RemoteMachineShellConnection(server)
         try:
             if not names:
                 hostname = shell.get_full_hostname()
             else:
                 hostname = names[server]
             rest = RestConnection(server)
             renamed, content = rest.rename_node(hostname, username=server.rest_username, password=server.rest_password)
             self.assertTrue(renamed, "Server %s is not renamed!Hostname %s. Error %s" %(
                                     server, hostname, content))
             hostnames[server] = hostname
         finally:
             shell.disconnect()
     return hostnames
コード例 #20
0
ファイル: hostnameTests.py プロジェクト: umang-cb/Jython
 def convert_to_hostname(self,
                         servers_with_hostnames,
                         username='******',
                         password='******'):
     try:
         hostname = []
         for server in servers_with_hostnames:
             shell = RemoteMachineShellConnection(server)
             info = shell.extract_remote_info()
             domain = ''.join(info.domain[0])
             if not domain:
                 output = shell.execute_command_raw('nslookup %s' %
                                                    info.hostname[0])
                 print output
                 self.fail(
                     "Domain is not defined, couchbase cannot be configured correctly. NOT A BUG. CONFIGURATION ISSUE"
                 )
             hostname.append(info.hostname[0] + "." + domain)
             master_rest = RestConnection(server)
             current_hostname = master_rest.get_nodes_self().hostname
             self.log.info("get_node_self function returned : {0}".format(
                 current_hostname))
             if server.ip in current_hostname:
                 self.log.info(
                     "Node {0} is referred via IP. Need to be referred with hostname. Changing the name of the node!!"
                     .format(server.ip))
                 version = RestConnection(server).get_nodes_self().version
                 if version.startswith("1.8.1") or version.startswith(
                         "2.0.0") or version.startswith("2.0.1"):
                     RemoteUtilHelper.use_hostname_for_server_settings(
                         server)
                     master_rest.init_cluster()
                 else:
                     master_rest.init_cluster()
                     master_rest.rename_node(username=username,
                                             password=password,
                                             port='',
                                             hostname=hostname[-1])
             else:
                 self.log.info(
                     "Node {0} already referred via hostname. No need to convert the name"
                     .format(server.ip))
     finally:
         shell.disconnect()
     return hostname
コード例 #21
0
ファイル: hostnamemgmt_base.py プロジェクト: umang-cb/Jython
 def rename_nodes(self, servers, names={}):
     print '\n\nrename names servers:', servers, ' names', names
     hostnames={}
     for server in servers:
         shell = RemoteMachineShellConnection(server)
         try:
             if not names:
                 hostname = shell.get_full_hostname()
             else:
                 hostname = names[server]
             rest = RestConnection(server)
             renamed, content = rest.rename_node(hostname, username=server.rest_username, password=server.rest_password)
             self.assertTrue(renamed, "Server %s is not renamed!Hostname %s. Error %s" %(
                                     server, hostname, content))
             hostnames[server] = hostname
         finally:
             shell.disconnect()
     return hostnames
コード例 #22
0
 def check_eventing_logs_for_panic(self):
     self.generate_map_nodes_out_dist()
     panic_str = "panic"
     eventing_nodes = self.get_nodes_from_services_map(
         service_type="eventing", get_all_nodes=True)
     if not eventing_nodes:
         return None
     for eventing_node in eventing_nodes:
         shell = RemoteMachineShellConnection(eventing_node)
         _, dir_name = RestConnection(eventing_node).diag_eval(
             'filename:absname(element(2, application:get_env(ns_server,error_logger_mf_dir))).'
         )
         eventing_log = str(dir_name) + '/eventing.log*'
         count, err = shell.execute_command(
             "zgrep \"{0}\" {1} | wc -l".format(panic_str, eventing_log))
         if isinstance(count, list):
             count = int(count[0])
         else:
             count = int(count)
         if count > self.panic_count:
             log.info(
                 "===== PANIC OBSERVED IN EVENTING LOGS ON SERVER {0}=====".
                 format(eventing_node.ip))
             panic_trace, _ = shell.execute_command(
                 "zgrep \"{0}\" {1}".format(panic_str, eventing_log))
             log.info("\n {0}".format(panic_trace))
             self.panic_count = count
         os_info = shell.extract_remote_info()
         if os_info.type.lower() == "windows":
             # This is a fixed path in all windows systems inside couchbase
             dir_name_crash = 'c://CrashDumps'
         else:
             dir_name_crash = str(dir_name) + '/../crash/'
         core_dump_count, err = shell.execute_command(
             "ls {0}| wc -l".format(dir_name_crash))
         if isinstance(core_dump_count, list):
             core_dump_count = int(core_dump_count[0])
         else:
             core_dump_count = int(core_dump_count)
         if core_dump_count > 0:
             log.info(
                 "===== CORE DUMPS SEEN ON EVENTING NODES, SERVER {0} : {1} crashes seen ====="
                 .format(eventing_node.ip, core_dump_count))
         shell.disconnect()
コード例 #23
0
ファイル: testssl.py プロジェクト: couchbase/testrunner
 def test_tls_1_dot_2_blocking(self):
     """
     1. Set tls version = 1.3
     2. Restart couchbase server
     3. Verify tls version = 1.3 and not set to 1.2(default)
     """
     rest = RestConnection(self.master)
     rest.set_min_tls_version(version="tlsv1.3")
     self.test_tls_min_version()
     try:
         for node in self.servers:
             shell = RemoteMachineShellConnection(node)
             shell.stop_couchbase()
             time.sleep(10)
             shell.start_couchbase()
             shell.disconnect()
     except Exception as e:
         self.fail(e)
     self.test_tls_min_version()
コード例 #24
0
def perform_cb_collect(_input, log_path=None):
    import logger
    log = logger.Logger.get_logger()
    for node in _input.servers:
        params = dict()
        if len(_input.servers) != 1:
            params['nodes'] = 'ns_1@' + node.ip
        else:
            # In case of single node we have to pass ip as below
            params['nodes'] = 'ns_1@' + '127.0.0.1'

        log.info('Collecting log on node ' + node.ip)
        rest = RestConnection(node)
        status, _, _ = rest.perform_cb_collect(params)
        time.sleep(10)  # This is needed as it takes a few seconds before the collection start
        log.info('CB collect status on %s is %s' % (node.ip, status))

        log.info('Polling active task endpoint to check CB collect status')
        if status is True:
            cb_collect_response = {}
            while True:
                content = rest.active_tasks()
                for response in content:
                    if response['type'] == 'clusterLogsCollection':
                        cb_collect_response = response
                        break
                if cb_collect_response['status'] == 'completed':
                    log.info(cb_collect_response)
                    break
                else:
                    time.sleep(10)  # CB collect in progress, wait for 10 seconds and check progress again

            log.info('Copy CB collect ZIP file to Client')
            remote_client = RemoteMachineShellConnection(node)
            cb_collect_path = cb_collect_response['perNode'][params['nodes']]['path']
            zip_file_copied = remote_client.get_file(os.path.dirname(cb_collect_path), os.path.basename(cb_collect_path),
                                                     log_path)
            log.info('%s node cb collect zip coped on client : %s' % (node.ip, zip_file_copied))

            if zip_file_copied:
                remote_client.execute_command("rm -f %s" % cb_collect_path)
                remote_client.disconnect()
コード例 #25
0
 def _run(self, server):
     mem = None
     try:
         if not self.local:
             from lib.remote.remote_util import RemoteMachineShellConnection
             remote_client = RemoteMachineShellConnection(server)
             print("Collecting memory info from %s\n" % server.ip)
             remote_cmd = "sh -c 'if [[ \"$OSTYPE\" == \"darwin\"* ]]; then sysctl hw.memsize|grep -Eo [0-9]; " \
                                 "else grep MemTotal /proc/meminfo|grep -Eo [0-9]; fi'"
             output, error = remote_client.execute_command(remote_cmd)
             print("\n".join(error))
             remote_client.disconnect()
             mem = int("".join(output))
     except Exception as e:
         self.fail.append((server.ip, e))
     else:
         if mem:
             self.succ[server.ip] = mem
         else:
             self.fail.append((server.ip, Exception("mem parse failed")))
コード例 #26
0
ファイル: csvdatatest.py プロジェクト: rayleyva/testrunner
 def create_and_restore_csv(self):
     try:
         self.__load_data()
         shell_obj = RemoteMachineShellConnection(self.master)
         self.log.info("Removing backup folder if already present")
         info = shell_obj.extract_remote_info()
         path = "/tmp/backup/"
         if info.type.lower() == "windows":
             path = "/cygdrive/c" + path
         #TODO : Check for mac also
         shell_obj.delete_files(path)
         create_dir = "mkdir " + path
         data_type = "csv:"
         destination = path + "data.csv"
         shell_obj.execute_command(create_dir)
         source = "http://*****:*****@ %s" % destination)
         source, destination = destination, source
         options = "-B standard_bucket0" + self.username_arg + self.password_arg
         self.log.info("Restoring data....!")
         shell_obj.execute_cbtransfer(source, destination, options)
         self.sleep(10)
         self.log.info(
             "Checking whether number of items loaded match with the number of items restored."
         )
         rest = RestConnection(self.master)
         itemCount = rest.get_bucket_json(
             'standard_bucket0')['basicStats']['itemCount']
         self.assertEqual(itemCount,
                          self.num_items,
                          msg="Number of items loaded do no match\
         with the number of items restored. Number of items loaded is {0} \
         but number of items restored is {1}".format(
                              self.num_items, itemCount))
         self.log.info(
             "Number of items loaded = Number of items restored. Pass!!")
     finally:
         shell_obj.disconnect()
コード例 #27
0
    def setUp(self):
        self.test_setup_finished = False
        log.info(
            "==============  Multiple CA Upgrade setup has started =============="
        )
        super(MultipleCAUpgrade, self).setUp()

        self.initial_version = self.input.param("initial_version",
                                                '6.6.3-9799')
        self.upgrade_version = self.input.param("upgrade_version",
                                                "7.1.0-1745")
        self.enc_key_mixed_mode = self.input.param("enc_key_mixed_mode", False)
        self.skip_rbac_internal_users_setup = self.input.param(
            "skip_rbac_internal_users_setup", False)
        self.skip_ldap_external_user_setup = self.input.param(
            "skip_ldap_external_user_setup", False)

        self.base_version = RestConnection(self.master).get_nodes_versions()[0]
        if self.enc_key_mixed_mode in ["True", True]:
            self.enc_key_mixed_mode = True
        else:
            self.enc_key_mixed_mode = False
        shell = RemoteMachineShellConnection(self.master)
        self.windows_test = False
        if shell.extract_remote_info().distribution_type == "windows":
            self.windows_test = True
        shell.disconnect()
        self.openssl_path = "/opt/couchbase/bin/openssl"
        self.inbox_folder_path = "/opt/couchbase/var/lib/couchbase/inbox/"
        if self.windows_test:
            self.openssl_path = "C:/Program Files/Couchbase/Server/bin/openssl"
            self.inbox_folder_path = "C:/Program Files/Couchbase/Server/var/lib/couchbase/inbox/"
        self.plain_passw_map = dict()

        self.load_sample_bucket(self.master, "travel-sample")
        if not self.skip_rbac_internal_users_setup:
            self.add_rbac_groups_roles(self.master)
        if not self.skip_ldap_external_user_setup:
            self.setup_ldap_config(server=self.master)
            self.add_ldap_user(self.master)
        self.test_setup_finished = True
コード例 #28
0
    def set_up_dgm(self):
        """Download fragmented, DGM dataset onto each cluster node, if not
        already locally available.

        The number of vbuckets and database schema must match the
        target cluster.

        Shutdown all cluster nodes.

        Do a cluster-restore.

        Restart all cluster nodes."""

        bucket = self.param("bucket", "default")
        ClusterOperationHelper.stop_cluster(self.input.servers)
        for server in self.input.servers:
            remote = RemoteMachineShellConnection(server)
            #TODO: Better way to pass num_nodes and db_size?
            self.get_data_files(remote, bucket, 1, 10)
            remote.disconnect()
        ClusterOperationHelper.start_cluster(self.input.servers)
コード例 #29
0
    def set_up_dgm(self):
        """Download fragmented, DGM dataset onto each cluster node, if not
        already locally available.

        The number of vbuckets and database schema must match the
        target cluster.

        Shutdown all cluster nodes.

        Do a cluster-restore.

        Restart all cluster nodes."""

        bucket = self.param("bucket", "default")
        ClusterOperationHelper.stop_cluster(self.input.servers)
        for server in self.input.servers:
            remote = RemoteMachineShellConnection(server)
            #TODO: Better way to pass num_nodes and db_size?
            self.get_data_files(remote, bucket, 1, 10)
            remote.disconnect()
        ClusterOperationHelper.start_cluster(self.input.servers)
コード例 #30
0
    def index_query_beer_sample(self):
        #delete default bucket
        self._cb_cluster.delete_bucket("default")
        master = self._cb_cluster.get_master_node()
        from lib.remote.remote_util import RemoteMachineShellConnection
        shell = RemoteMachineShellConnection(master)
        shell.execute_command("""curl -v -u Administrator:password \
                         -X POST http://{0}:8091/sampleBuckets/install \
                      -d '["beer-sample"]'""".format(master.ip))
        shell.disconnect()
        self.sleep(20)
        bucket = self._cb_cluster.get_bucket_by_name("beer-sample")
        index = self.create_index(bucket, "beer-index")
        self.wait_for_indexing_complete()
        self.validate_index_count(equal_bucket_doc_count=True,
                                  zero_rows_ok=False)

        query = {"match": "cafe", "field": "name"}
        hits, _, _, _ = index.execute_query(query,
                                         zero_results_ok=False,
                                         expected_hits=10)
        self.log.info("Hits: %s" % hits)
コード例 #31
0
 def load(self, generators_load):
     gens_load = []
     for generator_load in generators_load:
         gens_load.append(copy.deepcopy(generator_load))
     items = 0
     for gen_load in gens_load:
         items += (gen_load.end - gen_load.start)
     shell = RemoteMachineShellConnection(self.server)
     try:
         self.log.info("Delete directory's content %s/data/default/%s ..." % (self.directory, self.bucket_name))
         shell.execute_command('rm -rf %s/data/default/*' % self.directory)
         self.log.info("Create directory %s/data/default/%s..." % (self.directory, self.bucket_name))
         shell.execute_command('mkdir -p %s/data/default/%s' % (self.directory, self.bucket_name))
         self.log.info("Load %s documents to %s/data/default/%s..." % (items, self.directory, self.bucket_name))
         for gen_load in gens_load:
             for i in xrange(gen_load.end):
                 key, value = gen_load.next()
                 out = shell.execute_command("echo '%s' > %s/data/default/%s/%s.json" % (value, self.directory,
                                                                                         self.bucket_name, key))
         self.log.info("LOAD IS FINISHED")
     finally:
         shell.disconnect()
コード例 #32
0
ファイル: doc_loader.py プロジェクト: EricACooper/testrunner
 def load(self, generators_load):
     gens_load = []
     for generator_load in generators_load:
         gens_load.append(copy.deepcopy(generator_load))
     items = 0
     for gen_load in gens_load:
         items += (gen_load.end - gen_load.start)
     shell = RemoteMachineShellConnection(self.server)
     try:
         self.log.info("Delete directory's content %s/data/default/%s ..." % (self.directory, self.bucket_name))
         shell.execute_command('rm -rf %s/data/default/*' % self.directory)
         self.log.info("Create directory %s/data/default/%s..." % (self.directory, self.bucket_name))
         shell.execute_command('mkdir -p %s/data/default/%s' % (self.directory, self.bucket_name))
         self.log.info("Load %s documents to %s/data/default/%s..." % (items, self.directory, self.bucket_name))
         for gen_load in gens_load:
             for i in xrange(gen_load.end):
                 key, value = gen_load.next()
                 out = shell.execute_command("echo '%s' > %s/data/default/%s/%s.json" % (value, self.directory,
                                                                                         self.bucket_name, key))
         self.log.info("LOAD IS FINISHED")
     finally:
         shell.disconnect()
コード例 #33
0
    def _load_snapshot(self, server, bucket, file_base=None, overwrite=True):
        """Load data files from a snapshot"""

        dest_data_path = os.path.dirname(server.data_path or
                                         testconstants.COUCHBASE_DATA_PATH)
        src_data_path = "{0}-snapshots".format(dest_data_path)

        self.log.info("server={0}, src_data_path={1}, dest_data_path={2}"
                      .format(server.ip, src_data_path, dest_data_path))

        shell = RemoteMachineShellConnection(server)

        build_name, short_version, full_version = \
            shell.find_build_version("/opt/couchbase/", "VERSION.txt", "cb")

        src_file = self._build_tar_name(bucket, full_version, file_base)

        if not shell.file_exists(src_data_path, src_file):
            self.log.error("file '{0}/{1}' does not exist"
                           .format(src_data_path, src_file))
            shell.disconnect()
            return False

        if not overwrite:
            self._save_snapshot(server, bucket,
                                "{0}.tar.gz".format(
                                    time.strftime(PerfDefaults.strftime)))  # TODO: filename

        rm_cmd = "rm -rf {0}/{1} {0}/{1}-data {0}/_*".format(dest_data_path,
                                                             bucket)
        self._exec_and_log(shell, rm_cmd)

        unzip_cmd = "cd {0}; tar -xvzf {1}/{2}".format(dest_data_path,
                                                       src_data_path, src_file)
        self._exec_and_log(shell, unzip_cmd)

        shell.disconnect()
        return True
コード例 #34
0
ファイル: eventing_base.py プロジェクト: membase/testrunner
 def check_eventing_logs_for_panic(self):
     self.generate_map_nodes_out_dist()
     panic_str = "panic"
     eventing_nodes = self.get_nodes_from_services_map(service_type="eventing", get_all_nodes=True)
     if not eventing_nodes:
         return None
     for eventing_node in eventing_nodes:
         shell = RemoteMachineShellConnection(eventing_node)
         _, dir_name = RestConnection(eventing_node).diag_eval(
             'filename:absname(element(2, application:get_env(ns_server,error_logger_mf_dir))).')
         eventing_log = str(dir_name) + '/eventing.log*'
         count, err = shell.execute_command("zgrep \"{0}\" {1} | wc -l".
                                            format(panic_str, eventing_log))
         if isinstance(count, list):
             count = int(count[0])
         else:
             count = int(count)
         if count > self.panic_count:
             log.info("===== PANIC OBSERVED IN EVENTING LOGS ON SERVER {0}=====".format(eventing_node.ip))
             panic_trace, _ = shell.execute_command("zgrep \"{0}\" {1}".
                                                    format(panic_str, eventing_log))
             log.info("\n {0}".format(panic_trace))
             self.panic_count = count
         os_info = shell.extract_remote_info()
         if os_info.type.lower() == "windows":
             # This is a fixed path in all windows systems inside couchbase
             dir_name_crash = 'c://CrashDumps'
         else:
             dir_name_crash = str(dir_name) + '/../crash/'
         core_dump_count, err = shell.execute_command("ls {0}| wc -l".format(dir_name_crash))
         if isinstance(core_dump_count, list):
             core_dump_count = int(core_dump_count[0])
         else:
             core_dump_count = int(core_dump_count)
         if core_dump_count > 0:
             log.info("===== CORE DUMPS SEEN ON EVENTING NODES, SERVER {0} : {1} crashes seen =====".format(
                      eventing_node.ip, core_dump_count))
         shell.disconnect()
コード例 #35
0
    def _load_snapshot(self, server, bucket, file_base=None, overwrite=True):
        """Load data files from a snapshot"""

        dest_data_path = os.path.dirname(server.data_path
                                         or testconstants.COUCHBASE_DATA_PATH)
        src_data_path = "{0}-snapshots".format(dest_data_path)

        self.log.info(
            "server={0}, src_data_path={1}, dest_data_path={2}".format(
                server.ip, src_data_path, dest_data_path))

        shell = RemoteMachineShellConnection(server)

        build_name, short_version, full_version = \
            shell.find_build_version("/opt/couchbase/", "VERSION.txt", "cb")

        src_file = self._build_tar_name(bucket, full_version, file_base)

        if not shell.file_exists(src_data_path, src_file):
            self.log.error("file '{0}/{1}' does not exist".format(
                src_data_path, src_file))
            shell.disconnect()
            return False

        if not overwrite:
            self._save_snapshot(server, bucket, "{0}.tar.gz".format(
                time.strftime(PerfDefaults.strftime)))  # TODO: filename

        rm_cmd = "rm -rf {0}/{1} {0}/{1}-data {0}/_*".format(
            dest_data_path, bucket)
        self._exec_and_log(shell, rm_cmd)

        unzip_cmd = "cd {0}; tar -xvzf {1}/{2}".format(dest_data_path,
                                                       src_data_path, src_file)
        self._exec_and_log(shell, unzip_cmd)

        shell.disconnect()
        return True
コード例 #36
0
ファイル: multiple_CA.py プロジェクト: couchbase/testrunner
 def test_restart_node_with_encrypted_pkeys(self):
     """
     1. Init node cluster, with encrypted node pkeys
     2. Restart a node
     3. Failover and delta recover that node
     4. Restart the node again and rebalance-out this time
     5. Repeat steps 2 to 5 until you are left with master node
     """
     self.x509.generate_multiple_x509_certs(
         servers=self.servers[:self.nodes_init])
     self.x509.upload_root_certs(self.master)
     self.x509.upload_node_certs(servers=self.servers[:self.nodes_init])
     rest = RestConnection(self.master)
     nodes_in_cluster = [node for node in self.servers[:self.nodes_init]]
     for node in self.servers[1:self.nodes_init]:
         shell = RemoteMachineShellConnection(node)
         shell.restart_couchbase()
         shell.disconnect()
         self.sleep(10, "Wait after restart")
         self.cluster.async_failover(nodes_in_cluster, [node],
                                     graceful=False)
         self.wait_for_failover_or_assert(1)
         rest.set_recovery_type("ns_1@" + node.ip, recoveryType="delta")
         https_val = CbServer.use_https  # so that add_node uses https
         CbServer.use_https = True
         task = self.cluster.async_rebalance(nodes_in_cluster, [], [])
         CbServer.use_https = https_val
         self.wait_for_rebalance_to_complete(task)
         shell = RemoteMachineShellConnection(node)
         shell.restart_couchbase()
         shell.disconnect()
         https_val = CbServer.use_https  # so that add_node uses https
         CbServer.use_https = True
         task = self.cluster.async_rebalance(nodes_in_cluster, [], [node])
         self.wait_for_rebalance_to_complete(task)
         CbServer.use_https = https_val
         nodes_in_cluster.remove(node)
コード例 #37
0
    def test_upgrade_with_ldap_root_cert(self):
        """
        1. Setup a cluster with x509 certs with n2n encryption enabled.
        2. Start an ldap container with a root certificate.
        3. Setup an ldap client connection from CB cluster to ldap server.
        4. Create an ldap user.
        5. Upgrade the CB cluster offline.
        6. Add ldap's root cert to cluster's trusted CAs and make ldap
        client connection from CB to ldap server using cluster's trusted CAs
        7. Validate ldap user authentication works
        """
        self.log.info("------------- test started-------------")
        self.generate_x509_certs(self.servers)
        self.upload_x509_certs(self.servers)
        ntonencryptionBase().setup_nton_cluster(servers=self.servers,
                                                clusterEncryptionLevel="all")

        # setup and start ldap container
        self.log.info("Setting up ldap container")
        self.docker = LdapContainer()
        shell = RemoteMachineShellConnection(self.master)
        self.docker.start_docker(shell)
        self.docker.stop_all_containers(shell)
        self.docker.remove_all_containers(shell)
        self.docker.start_ldap_container(shell)
        shell.disconnect()

        # Setup ldap config and add an ldap user
        self.log.info("Setting up ldap config and creating ldap user")
        param = self.get_ldap_params(
            hosts='ldap.example.org',
            port='636',
            encryption='TLS',
            bindDN='cn=admin,dc=example,dc=org',
            bindPass="******",
            serverCertValidation='false',
            userDNMapping='{"template":"cn=%u,dc=example,dc=org"}')
        self.setup_ldap_config(server=self.master, param=param)
        self.add_ldap_user(server=self.master, user_name=self.docker.ldap_user)

        for node in self.servers:
            self.log.info(
                "-------------Performing upgrade on node {0} to version------------- {1}"
                .format(node, self.upgrade_version))
            upgrade_threads = self._async_update(
                upgrade_version=self.upgrade_version, servers=[node])
            for threads in upgrade_threads:
                threads.join()
            self.log.info("Upgrade finished")

        # Add multiple CAs with strict level of n2n encryption
        CbServer.use_https = True
        ntonencryptionBase().setup_nton_cluster(
            servers=self.servers, clusterEncryptionLevel="strict")
        self._reset_original(self.servers)
        self.x509_new = x509main(host=self.master,
                                 standard="pkcs8",
                                 encryption_type="aes256",
                                 passphrase_type="script")
        self.x509_new.generate_multiple_x509_certs(servers=self.servers)
        for server in self.servers:
            _ = self.x509_new.upload_root_certs(server)
        self.x509_new.upload_node_certs(servers=self.servers)
        self.x509_new.delete_unused_out_of_the_box_CAs(server=self.master)

        # Upload ldap's root CA to cluster's trusted CAs
        self.log.info("Copying ldap CA to inbox/CA folder")
        # TODO: Change the hardcoded path (for now it's okay since the VM on which container is running is fixed )
        self.copy_file_from_slave_to_server(
            server=self.master,
            src=self.docker.ldap_ca,
            dst=
            "/opt/couchbase/var/lib/couchbase/inbox/CA/ldap_container_ca.crt")
        self.log.info("Uploading ldap CA to CB")
        self.x509_new.upload_root_certs(self.master)

        self.log.info("Changing ldap config to use CB trusted CAs")
        param = self.get_ldap_params(
            hosts=self.docker.hostname,
            port=str(self.docker.ssl_port),
            encryption='TLS',
            bindDN=self.docker.bindDN,
            bindPass=self.docker.bindPass,
            serverCertValidation='true',
            userDNMapping='{"template":"cn=%u,dc=example,dc=org"}')
        self.setup_ldap_config(server=self.master, param=param)

        # Validate ldap user post upgrade
        ldap_rest = RestConnection(self.master)
        ldap_rest.username = self.docker.ldap_user
        ldap_rest.password = self.docker.bindPass
        status, content, response = self.make_rest_call_with_ldap_user(
            ldap_rest=ldap_rest)
        if not status:
            self.fail(
                "Rest call with ldap user credentials failed with content "
                "{0}, response{1}".format(content, response))
コード例 #38
0
class NRUMonitor(threading.Thread):

    CMD_NUM_RUNS = "/opt/couchbase/bin/cbstats localhost:11210 "\
                   "all | grep ep_num_access_scanner_runs"

    CMD_NUM_ITEMS = "/opt/couchbase/bin/cbstats localhost:11210 "\
                    "all | grep ep_access_scanner_num_items"

    CMD_RUNTIME = "/opt/couchbase/bin/cbstats localhost:11210 "\
                  "all | grep ep_access_scanner_last_runtime"

    def __init__(self, freq, reb_delay, eperf):
        self.freq = freq
        self.reb_delay = reb_delay
        self.eperf = eperf
        self.shell = None
        super(NRUMonitor, self).__init__()

    def run(self):
        print("[NRUMonitor] started running")

        # TODO: evaluate all servers, smarter polling freq
        server = self.eperf.input.servers[0]
        self.shell = RemoteMachineShellConnection(server)

        nru_num = self.nru_num = self.get_nru_num()
        if self.nru_num < 0:
            return

        while nru_num <= self.nru_num:
            print("[NRUMonitor] nru_num = %d, sleep for %d seconds"\
                  % (nru_num, self.freq))
            time.sleep(self.freq)
            nru_num = self.get_nru_num()
            if nru_num < 0:
                break

        gmt_now = time.strftime(PerfDefaults.strftime, time.gmtime())
        speed, num_items, run_time = self.get_nru_speed()

        print("[NRUMonitor] access scanner finished at: %s, speed: %s, "\
              "num_items: %s, run_time: %s"\
              % (gmt_now, speed, num_items, run_time))

        self.eperf.clear_hot_keys()

        print("[NRUMonitor] scheduled rebalance after %d seconds"\
              % self.reb_delay)

        self.shell.disconnect()
        self.eperf.latched_rebalance(delay=self.reb_delay, sync=True)

        gmt_now = time.strftime(PerfDefaults.strftime, time.gmtime())
        print("[NRUMonitor] rebalance finished: %s" % gmt_now)

        print("[NRUMonitor] stopped running")

    def get_nru_num(self):
        """Retrieve how many times nru access scanner has been run"""
        return self._get_shell_int(NRUMonitor.CMD_NUM_RUNS)

    def get_nru_speed(self):
        """Retrieve runtime and num_items for the last access scanner run
        Calculate access running speed

        @return (speed, num_items, run_time)
        """
        num_items = self._get_shell_int(NRUMonitor.CMD_NUM_ITEMS)

        if num_items <= 0:
            return -1, -1, -1

        run_time = self._get_shell_int(NRUMonitor.CMD_RUNTIME)

        if run_time <= 0:
            return -1, num_items, -1

        speed = num_items // run_time

        return speed, num_items, run_time

    def _get_shell_int(self, cmd):
        """Fire a shell command and return output as integer"""
        if not cmd:
            print("<_get_shell_int> invalid cmd")
            return -1

        output, error = self.shell.execute_command(cmd)

        if error:
            print("<_get_shell_int> unable to execute cmd '%s' from %s: %s"\
                  % (cmd, self.shell.ip, error))
            return -1

        if not output:
            print("<_get_shell_int> unable to execute cmd '%s' from %s: "\
                  "empty output" % (cmd, self.shell.ip))
            return -1

        try:
            num = int(output[0].split(":")[1])
        except (AttributeError, IndexError, ValueError) as e:
            print("<_get_shell_int> unable to execute cmd '%s' from %s:"\
                  "output - %s, error - %s" % (cmd, self.shell.ip, output, e))
            return -1

        if num < 0:
            print("<_get_shell_int> invalid number: %d" % num)
            return -1

        return num
コード例 #39
0
    def test_items_append(self):
        self.desired_item_size = self.input.param("desired_item_size", 2048)
        self.append_size = self.input.param("append_size", 1024)
        self.fixed_append_size = self.input.param("fixed_append_size", True)
        self.append_ratio = self.input.param("append_ratio", 0.5)
        self._load_all_buckets(self.master, self.gen_create, "create", 0,
                               batch_size=1000, pause_secs=5, timeout_secs=100)

        for bucket in self.buckets:
            self.value_size = self.input.param("value_size", 512)
            verify_dict = {}
            vkeys, dkeys = bucket.kvs[1].key_set()

            key_count = len(vkeys)
            app_ratio = self.append_ratio * key_count
            selected_keys = []
            i = 0
            for key in vkeys:
                i += 1
                if i >= app_ratio:
                    break
                selected_keys.append(key)

            awareness = VBucketAwareMemcached(RestConnection(self.master), bucket.name)
            if self.kv_verify:
                for key in selected_keys:
                    value = awareness.memcached(key).get(key)[2]
                    verify_dict[key] = value

            self.log.info("Bucket: {0}".format(bucket.name))
            self.log.info("Appending to have items whose initial size was "
                            + "{0} to equal or cross a size of {1}".format(self.value_size, self.desired_item_size))
            self.log.info("Item-appending of {0} items starting ..".format(len(selected_keys) + 1))

            index = 3
            while self.value_size < self.desired_item_size:
                str_len = self.append_size
                if not self.fixed_append_size:
                    str_len = int(math.pow(2, index))

                for key in selected_keys:
                    random_string = self.random_str_generator(str_len)
                    awareness.memcached(key).append(key, random_string)
                    if self.kv_verify:
                        verify_dict[key] = verify_dict[key] + random_string
                self.log.info("for {0} items size was increased to {1} Bytes".format(len(selected_keys) + 1, self.value_size))
                self.value_size += str_len
                index += 1

            self.log.info("The appending of {0} items ended".format(len(selected_keys) + 1))

        for bucket in self.buckets:
            msg = "Bucket:{0}".format(bucket.name)
            self.log.info("VERIFICATION <" + msg + ">: Phase 0 - Check the gap between "
                      + "mem_used by the bucket and total_allocated_bytes")
            stats = StatsCommon()
            mem_used_stats = stats.get_stats(self.servers, bucket, 'memory', 'mem_used')
            total_allocated_bytes_stats = stats.get_stats(self.servers, bucket, 'memory', 'total_allocated_bytes')
            total_fragmentation_bytes_stats = stats.get_stats(self.servers, bucket, 'memory', 'total_fragmentation_bytes')

            for server in self.servers:
                self.log.info("In {0} bucket {1}, total_fragmentation_bytes + the total_allocated_bytes = {2}"
                              .format(server.ip, bucket.name, (int(total_fragmentation_bytes_stats[server]) + int(total_allocated_bytes_stats[server]))))
                self.log.info("In {0} bucket {1}, mem_used = {2}".format(server.ip, bucket.name, mem_used_stats[server]))
                self.log.info("In {0} bucket {1}, the difference between actual memory used by memcached and mem_used is {2} times"
                              .format(server.ip, bucket.name, float(int(total_fragmentation_bytes_stats[server]) + int(total_allocated_bytes_stats[server])) / float(mem_used_stats[server])))


            self.log.info("VERIFICATION <" + msg + ">: Phase1 - Check if any of the "
                    + "selected keys have value less than the desired value size")
            for key in selected_keys:
                value = awareness.memcached(key).get(key)[2]
                if len(value) < self.desired_item_size:
                    self.fail("Failed to append enough to make value size surpass the "
                                + "size {0}, key {1} has size {2}".format(self.desired_item_size, key, len(value)))

            if self.kv_verify:
                self.log.info("VERIFICATION <" + msg + ">: Phase2 - Check if the content "
                        + "after the appends match what's expected")
                for k in verify_dict:
                    if awareness.memcached(k).get(k)[2] != verify_dict[k]:
                        self.fail("Content at key {0}: not what's expected.".format(k))
                self.log.info("VERIFICATION <" + msg + ">: Successful")

        shell = RemoteMachineShellConnection(self.master)
        shell.execute_cbstats("", "raw", keyname="allocator", vbid="")
        shell.disconnect()
コード例 #40
0
 def tear_down_proxy(self):
     if len(self.input.moxis) > 0:
         shell = RemoteMachineShellConnection(self.input.moxis[0])
         shell.stop_moxi()
         shell.disconnect()
コード例 #41
0
ファイル: eventing_base.py プロジェクト: membase/testrunner
 def change_time_zone(self,server,timezone="UTC"):
     remote_client = RemoteMachineShellConnection(server)
     remote_client.execute_command("timedatectl set-timezone "+timezone)
     remote_client.disconnect()
コード例 #42
0
ファイル: tuq_tokens.py プロジェクト: prasanna135/testrunner
    def test_tokens_secondary_indexes(self):
        server = self.master
        shell = RemoteMachineShellConnection(server)
        shell.execute_command(
            """curl -v -u Administrator:password \
                             -X POST http://{0}:8091/sampleBuckets/install \
                          -d '["{1}"]'""".format(
                server.ip, "beer-sample"
            )
        )
        self.sleep(30)

        shell.disconnect()
        # bucket_name = "beer-sample"
        self.query = "create primary index on `beer-sample`"
        self.run_cbq_query()
        self.query = "create index idx1 on `beer-sample`(description,name )"
        self.run_cbq_query()
        self.query = (
            "create index idx2 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description) END ,description,name )"
        )
        self.run_cbq_query()
        self.query = 'create index idx3 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"lower","names":true,"specials":false}) END ,description,name )'
        self.run_cbq_query()
        self.query = 'create index idx4 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"upper","names":false,"specials":true}) END ,description,name )'
        self.run_cbq_query()
        self.query = 'create index idx5 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"upper","names":false}) END ,description,name )'
        self.run_cbq_query()
        self.query = 'create index idx6 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"upper"}) END ,description,name )'
        self.run_cbq_query()
        self.query = "create index idx7 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{}) END ,description,name )"
        self.run_cbq_query()
        self.query = 'create index idx8 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"":""}) END ,description,name )'
        self.run_cbq_query()
        self.query = 'create index idx9 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"random"}) END ,description,name )'
        self.run_cbq_query()
        self.query = 'create index idx10 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"names":"random"}) END ,description,name )'
        self.run_cbq_query()
        self.query = 'create index idx11 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"specials":"random"}) END ,description,name )'
        self.run_cbq_query()
        self.query = "create index idx12 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description) END )"
        self.run_cbq_query()
        self.query = (
            'create index idx13 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"lower"}) END  )'
        )
        self.run_cbq_query()
        self.query = (
            'create index idx14 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"upper"}) END )'
        )
        self.run_cbq_query()
        self.query = 'create index idx15 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"lower","names":true,"specials":false}) END  )'
        self.run_cbq_query()
        self.query = 'create index idx16 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"upper","names":false,"specials":true}) END  )'
        self.run_cbq_query()
        self.query = 'create index idx17 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"upper","names":false}) END )'
        self.run_cbq_query()
        self.query = "create index idx18 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{}) END )"
        self.run_cbq_query()
        self.query = 'create index idx19 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"":""}) END  )'
        self.run_cbq_query()
        self.query = 'create index idx20 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"random"}) END  )'
        self.run_cbq_query()
        self.query = 'create index idx21 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"names":"random"}) END  )'
        self.run_cbq_query()
        self.query = 'create index idx22 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"specials":"random"}) END  )'
        self.run_cbq_query()

        self.query = 'explain select name from `beer-sample` where any v in tokens(description) satisfies v = "golden" END limit 10'
        actual_result = self.run_cbq_query()
        plan = ExplainPlanHelper(actual_result)

        self.assertTrue(actual_result["results"])
        self.assertTrue("covers" in str(plan))
        self.assertTrue(plan["~children"][0]["~children"][0]["scan"]["index"] == "idx2")

        self.assertTrue(
            str(plan["~children"][0]["~children"][0]["scan"]["covers"][0])
            == ("cover ((distinct (array `v` for `v` in tokens((`beer-sample`.`description`)) end)))")
        )

        self.query = 'select name from `beer-sample` use index(`#primary`) where any v in tokens(reverse(description)) satisfies v = "nedlog" END order by meta().id limit 10'
        expected_result = self.run_cbq_query()

        self.query = 'select name from `beer-sample` where any v in tokens(reverse(description)) satisfies v = "nedlog" END order by meta().id limit 10'
        actual_result = self.run_cbq_query()
        # self.assertTrue(str(actual_result['results'])=="[{u'name': u'21A IPA'}, {u'name': u'Amendment Pale Ale'}, {u'name': u'Double Trouble IPA'}, {u'name': u'South Park Blonde'}, {u'name': u'Restoration Pale Ale'}, {u'name': u'S.O.S'}, {u'name': u'Satsuma Harvest Wit'}, {u'name': u'Adnams Explorer'}, {u'name': u'Shock Top'}, {u'name': u'Anniversary Maibock'}]" )
        self.assertTrue((actual_result["results"]) == (expected_result["results"]))

        self.query = 'explain select name from `beer-sample` where any v in tokens(description,{"case":"lower","names":true,"specials":false}) satisfies v = "brewery" END limit 10'
        actual_result = self.run_cbq_query()
        plan = ExplainPlanHelper(actual_result)
        self.assertTrue("covers" in str(plan))
        self.assertTrue(
            str(plan["~children"][0]["~children"][0]["scan"]["covers"][0])
            == (
                'cover ((distinct (array `v` for `v` in tokens((`beer-sample`.`description`), {"case": "lower", "names": true, "specials": false}) end)))'
            )
        )
        self.assertTrue(plan["~children"][0]["~children"][0]["scan"]["index"] == "idx3")

        self.query = 'select name from `beer-sample` use index(`#primary`) where any v in tokens(description,{"case":"lower","names":true,"specials":false}) satisfies v = "brewery" END order by meta().id limit 10'

        expected_result = self.run_cbq_query()

        self.query = 'select name from `beer-sample` use index(`idx15`) where any v in tokens(description,{"case":"lower","names":true,"specials":false}) satisfies v = "brewery" END order by meta().id limit 10'
        actual_result = self.run_cbq_query()

        self.assertTrue((actual_result["results"]) == (expected_result["results"]))

        self.query = 'explain select name from `beer-sample` use index(`idx14`) where any v in tokens(description,{"case":"upper","names":false,"specials":true}) satisfies v = "BREWERY" END order by meta().id limit 10'
        actual_result = self.run_cbq_query()
        plan = ExplainPlanHelper(actual_result)
        self.assertTrue("covers" in str(plan))
        self.assertTrue(
            str(plan["~children"][0]["~children"][0]["scan"]["covers"][0])
            == (
                'cover ((distinct (array `v` for `v` in tokens((`beer-sample`.`description`), {"case": "upper", "names": false, "specials": true}) end)))'
            )
        )
        self.assertTrue(str(plan["~children"][0]["~children"][0]["scan"]["index"]) == "idx4")

        self.query = 'select name from `beer-sample` use index(`idx16`) where any v in tokens(description,{"case":"upper","names":false,"specials":true}) satisfies v = "BREWERY" END order by meta().id limit 10'
        actual_result = self.run_cbq_query()
        self.assertTrue((actual_result["results"]) == (expected_result["results"]))

        self.query = 'explain select name from `beer-sample` where any v in tokens(description,{"case":"upper","names":false}) satisfies v = "GOLDEN" END limit 10'
        actual_result = self.run_cbq_query()
        plan = ExplainPlanHelper(actual_result)
        self.assertTrue("covers" in str(plan))
        self.assertTrue(plan["~children"][0]["~children"][0]["scan"]["index"] == "idx5")

        self.query = 'select name from `beer-sample` use index(`idx17`) where any v in tokens(description,{"case":"upper","names":false}) satisfies v = "GOLDEN" END limit 10'
        actual_result = self.run_cbq_query()

        self.query = 'select name from `beer-sample` use index(`#primary`) where any v in tokens(description,{"case":"upper","names":false}) satisfies v = "GOLDEN" END limit 10'
        expected_result = self.run_cbq_query()
        self.assertTrue(actual_result["results"] == expected_result["results"])

        self.query = 'explain select name from `beer-sample` where any v in tokens(description,{}) satisfies  v = "golden" END limit 10'
        actual_result = self.run_cbq_query()
        plan = ExplainPlanHelper(actual_result)
        self.assertTrue("covers" in str(plan))
        self.assertTrue(plan["~children"][0]["~children"][0]["scan"]["index"] == "idx7")
        self.query = 'select name from `beer-sample` use index(`idx18`) where any v in tokens(description,{}) satisfies  v = "golden" END limit 10'
        actual_result = self.run_cbq_query()

        self.query = 'select name from `beer-sample` use index(`#primary`) where any v in tokens(description,{}) satisfies  v = "golden" END limit 10'
        expected_result = self.run_cbq_query()
        self.assertTrue(actual_result["results"] == expected_result["results"])

        self.query = 'explain select name from `beer-sample` where any v in tokens(description,{"":""}) satisfies v = "golden" END limit 10'
        actual_result = self.run_cbq_query()
        plan = ExplainPlanHelper(actual_result)
        self.assertTrue("covers" in str(plan))
        self.assertTrue(plan["~children"][0]["~children"][0]["scan"]["index"] == "idx8")

        self.query = 'select name from `beer-sample` use index(`idx19`)  where any v in tokens(description,{"":""}) satisfies v = "golden" END order by name '
        actual_result = self.run_cbq_query()
        self.query = 'select name from `beer-sample` use index(`#primary`)  where any v in tokens(description,{"":""}) satisfies v = "golden" END order by name '
        expected_result = self.run_cbq_query()
        self.assertTrue(actual_result["results"] == expected_result["results"])

        self.query = 'explain select name from `beer-sample` where any v in tokens(description,{"case":"random"}) satisfies  v = "golden"  END '
        actual_result = self.run_cbq_query()
        plan = ExplainPlanHelper(actual_result)
        self.assertTrue("covers" in str(plan))
        self.assertTrue(plan["~children"][0]["scan"]["index"] == "idx9")

        self.query = 'select name from `beer-sample` use index(`idx20`) where any v in tokens(description,{"case":"random"}) satisfies  v = "golden"  END order by name '
        actual_result = self.run_cbq_query()
        self.query = 'select name from `beer-sample` use index(`#primary`) where any v in tokens(description,{"case":"random"}) satisfies  v = "golden"  END  order by name '
        expected_result = self.run_cbq_query()
        self.assertTrue(actual_result["results"] == expected_result["results"])

        self.query = 'explain select name from `beer-sample` where any v in tokens(description,{"specials":"random"}) satisfies v = "brewery" END order by name'
        actual_result = self.run_cbq_query()
        plan = ExplainPlanHelper(actual_result)
        self.assertTrue("covers" in str(plan))
        self.assertTrue(plan["~children"][0]["~children"][0]["scan"]["index"] == "idx11")

        self.query = 'select name from `beer-sample` use index(`idx22`) where any v in tokens(description,{"specials":"random"}) satisfies  v = "golden"  END  order by name'
        actual_result = self.run_cbq_query()
        self.query = 'select name from `beer-sample` use index(`#primary`) where any v in tokens(description,{"specials":"random"}) satisfies  v = "golden"  END order by name'
        expected_result = self.run_cbq_query()
        self.assertTrue(actual_result["results"] == expected_result["results"])

        self.query = 'explain select name from `beer-sample` where any v in tokens(description,{"names":"random"}) satisfies v = "brewery" END limit 10'
        actual_result = self.run_cbq_query()
        plan = ExplainPlanHelper(actual_result)
        self.assertTrue("covers" in str(plan))
        self.assertTrue(plan["~children"][0]["~children"][0]["scan"]["index"] == "idx10")

        self.query = 'select name from `beer-sample` use index(`idx21`) where any v in tokens(description,{"names":"random"}) satisfies  v = "golden"  END  limit 10'
        actual_result = self.run_cbq_query()
        self.query = 'select name from `beer-sample` use index(`#primary`) where any v in tokens(description,{"names":"random"}) satisfies  v = "golden"  END limit 10'
        expected_result = self.run_cbq_query()
        self.assertTrue(actual_result["results"] == expected_result["results"])
コード例 #43
0
 def kill_producer(self, server):
     remote_client = RemoteMachineShellConnection(server)
     remote_client.kill_eventing_process(name="eventing-producer")
     remote_client.disconnect()
コード例 #44
0
 def reset_firewall(self,server):
     shell = RemoteMachineShellConnection(server)
     shell.info = shell.extract_remote_info()
     o, r = shell.execute_command("/sbin/iptables --flush")
     shell.log_command_output(o, r)
     shell.disconnect()
コード例 #45
0
 def tear_down_proxy(self):
     if len(self.input.moxis) > 0:
         shell = RemoteMachineShellConnection(self.input.moxis[0])
         shell.stop_moxi()
         shell.disconnect()
コード例 #46
0
    def test_items_append(self):
        self.desired_item_size = self.input.param("desired_item_size", 2048)
        self.append_size = self.input.param("append_size", 1024)
        self.fixed_append_size = self.input.param("fixed_append_size", True)
        self.append_ratio = self.input.param("append_ratio", 0.5)
        self._load_all_buckets(self.master,
                               self.gen_create,
                               "create",
                               0,
                               batch_size=1000,
                               pause_secs=5,
                               timeout_secs=100)

        for bucket in self.buckets:
            self.value_size = self.input.param("value_size", 512)
            verify_dict = {}
            vkeys, dkeys = bucket.kvs[1].key_set()

            key_count = len(vkeys)
            app_ratio = self.append_ratio * key_count
            selected_keys = []
            i = 0
            for key in vkeys:
                i += 1
                if i >= app_ratio:
                    break
                selected_keys.append(key)

            awareness = VBucketAwareMemcached(RestConnection(self.master),
                                              bucket.name)
            if self.kv_verify:
                for key in selected_keys:
                    value = awareness.memcached(key).get(key)[2]
                    verify_dict[key] = value

            self.log.info("Bucket: {0}".format(bucket.name))
            self.log.info("Appending to have items whose initial size was " +
                          "{0} to equal or cross a size of {1}".format(
                              self.value_size, self.desired_item_size))
            self.log.info("Item-appending of {0} items starting ..".format(
                len(selected_keys) + 1))

            index = 3
            while self.value_size < self.desired_item_size:
                str_len = self.append_size
                if not self.fixed_append_size:
                    str_len = int(math.pow(2, index))

                for key in selected_keys:
                    random_string = self.random_str_generator(str_len)
                    awareness.memcached(key).append(key, random_string)
                    if self.kv_verify:
                        verify_dict[key] = verify_dict[key] + random_string
                self.log.info(
                    "for {0} items size was increased to {1} Bytes".format(
                        len(selected_keys) + 1, self.value_size))
                self.value_size += str_len
                index += 1

            self.log.info("The appending of {0} items ended".format(
                len(selected_keys) + 1))

        for bucket in self.buckets:
            msg = "Bucket:{0}".format(bucket.name)
            self.log.info("VERIFICATION <" + msg +
                          ">: Phase 0 - Check the gap between " +
                          "mem_used by the bucket and total_allocated_bytes")
            stats = StatsCommon()
            mem_used_stats = stats.get_stats(self.servers, bucket, 'memory',
                                             'mem_used')
            total_allocated_bytes_stats = stats.get_stats(
                self.servers, bucket, 'memory', 'total_allocated_bytes')
            total_fragmentation_bytes_stats = stats.get_stats(
                self.servers, bucket, 'memory', 'total_fragmentation_bytes')

            for server in self.servers:
                self.log.info(
                    "In {0} bucket {1}, total_fragmentation_bytes + the total_allocated_bytes = {2}"
                    .format(server.ip, bucket.name,
                            (int(total_fragmentation_bytes_stats[server]) +
                             int(total_allocated_bytes_stats[server]))))
                self.log.info("In {0} bucket {1}, mem_used = {2}".format(
                    server.ip, bucket.name, mem_used_stats[server]))
                self.log.info(
                    "In {0} bucket {1}, the difference between actual memory used by memcached and mem_used is {2} times"
                    .format(
                        server.ip, bucket.name,
                        float(
                            int(total_fragmentation_bytes_stats[server]) +
                            int(total_allocated_bytes_stats[server])) /
                        float(mem_used_stats[server])))

            self.log.info(
                "VERIFICATION <" + msg + ">: Phase1 - Check if any of the " +
                "selected keys have value less than the desired value size")
            for key in selected_keys:
                value = awareness.memcached(key).get(key)[2]
                if len(value) < self.desired_item_size:
                    self.fail(
                        "Failed to append enough to make value size surpass the "
                        + "size {0}, key {1} has size {2}".format(
                            self.desired_item_size, key, len(value)))

            if self.kv_verify:
                self.log.info("VERIFICATION <" + msg +
                              ">: Phase2 - Check if the content " +
                              "after the appends match what's expected")
                for k in verify_dict:
                    if awareness.memcached(k).get(k)[2] != verify_dict[k]:
                        self.fail(
                            "Content at key {0}: not what's expected.".format(
                                k))
                self.log.info("VERIFICATION <" + msg + ">: Successful")

        shell = RemoteMachineShellConnection(self.master)
        shell.execute_cbstats("", "raw", keyname="allocator", vbid="")
        shell.disconnect()
コード例 #47
0
ファイル: testssl.py プロジェクト: couchbase/testrunner
    def test_tls_ciphers_used(self):
        """
        Checks cipher-suites used is a subset of preconfigured list of cipher-suites.
        Checks for TLS 1.2 and TLS 1.3
        """
        for node in self.servers:
            self.log.info("Testing node {0}".format(node.ip))
            ports_to_scan = self.get_service_ports(node)
            ports_to_scan.extend(self.ports_to_scan)
            for node_port in ports_to_scan:
                self.log.info("Port being tested: {0}".format(node_port))
                cmd = self.testssl.TEST_SSL_FILENAME + " --warnings off --color 0 {0}:{1}" \
                    .format(node.ip, node_port)
                self.log.info("The command is {0}".format(cmd))
                shell = RemoteMachineShellConnection(self.slave_host)
                output, error = shell.execute_command(cmd)
                shell.disconnect()
                output = output.decode().split("\n")
                check_next = 0
                stmt = ""
                tls_1_dot_2_obtained_list = []
                tls_1_dot_3_obtained_list = []
                for line in output:
                    if check_next == 1:
                        if line == "":
                            check_next = 0
                            stmt = ""
                        elif "TLSv1.3 (" in line:
                            stmt = "TLSv1.3 ("
                        elif stmt == "TLSv1.2 (":
                            tls_1_dot_2_obtained_list.append(line.split()[-1])
                        elif stmt == "TLSv1.3 (":
                            tls_1_dot_3_obtained_list.append(line.split()[-1])
                    elif "TLSv1.2 (" in line:
                        check_next = 1
                        stmt = "TLSv1.2 ("

                # Get the preconfigured list of cipher-suites
                shell = RemoteMachineShellConnection(self.master)
                output, error = shell.execute_couchbase_cli(cli_command="setting-security",
                                                            options="--get",
                                                            cluster_host="localhost",
                                                            user="******",
                                                            password="******")
                shell.disconnect()
                content = json.loads(output[0])
                services_ports_map = {11207: "data", 18094: "fullTextSearch", 19102: "index",
                                      18096: "eventing", 18093: "query", 18095: "analytics",
                                      18097: "backup", 18091: "clusterManager",
                                      18092: "clusterManager"}
                cipher_order_list = content[services_ports_map[node_port]]["supportedCipherSuites"]

                # Verifies TLS 1.2 cipher-suites is a subset of preconfigured list of
                # cipher-suites
                is_present = False
                if all(ciphers in cipher_order_list for ciphers in tls_1_dot_2_obtained_list):
                    is_present = True
                self.assertTrue(is_present, msg="Obtained list of TLS 1.2 cipher-suites is not a "
                                                "subset of pre-configured list of cipher-suites on "
                                                "port: {0} :: service: {1}"
                                .format(node_port, services_ports_map[node_port]))

                # Verifies TLS 1.3 cipher-suites is a subset of preconfigured list of
                # cipher-suites
                is_present = False
                if all(ciphers in cipher_order_list for ciphers in tls_1_dot_3_obtained_list):
                    is_present = True
                self.assertTrue(is_present, msg="Obtained list of TLS 1.3 cipher-suites is not a "
                                                "subset of pre-configured list of cipher-suites on "
                                                "port: {0} :: service: {1}"
                                .format(node_port, services_ports_map[node_port]))
コード例 #48
0
ファイル: testssl.py プロジェクト: couchbase/testrunner
    def test_port_security(self):
        """
        Scanning the ports to test vulnerabilities
        """
        for node in self.servers:
            self.log.info("Testing node {0}".format(node.ip))
            ports_to_scan = self.get_service_ports(node)
            ports_to_scan.extend(self.ports_to_scan)
            for node_port in ports_to_scan:
                self.log.info("Port being tested: {0}".format(node_port))
                cmd = self.testssl.TEST_SSL_FILENAME + " --warnings off --color 0 {0}:{1}" \
                    .format(node.ip, node_port)
                self.log.info("The command is {0}".format(cmd))
                shell = RemoteMachineShellConnection(self.slave_host)
                output, error = shell.execute_command(cmd)
                shell.disconnect()
                output = output.decode().split("\n")
                check_next = 0
                stmt = ""
                scan_count = 0
                for line in output:
                    if check_next == 1:
                        if stmt == "Certificate Validity":
                            if ">= 10 years is way too long" in line:
                                self.log.info(">= 10 years is way too long. (Fix moved to "
                                              "Morpheus MB-50249)".format(node_port))
                            check_next = 0
                            stmt = ""

                    # Testing Protocols
                    elif "SSLv2  " in line or "SSLv3  " in line:
                        scan_count = scan_count + 1
                        if "offered (NOT ok)" in line:
                            self.fail("SSLvx is offered on port {0}".format(node_port))

                    # Testing Cipher Categories
                    elif "LOW: 64 Bit + DES, RC[2,4]" in line:
                        scan_count = scan_count + 1
                        if "offered (NOT ok)" in line:
                            self.fail("Cipher is not ok on port {0}".format(node_port))

                    # Testing Server's Cipher Preferences
                    elif "Has server cipher order?" in line:
                        scan_count = scan_count + 1
                        if "no" in line:
                            self.fail("Server cipher ordering not set".format(node_port))

                    # Testing Server Defaults
                    elif "Certificate Validity" in line:
                        scan_count = scan_count + 1
                        check_next = 1
                        stmt = "Certificate Validity"

                    # Testing Vulnerabilities
                    elif "Heartbleed (CVE-2014-0160)" in line:
                        scan_count = scan_count + 1
                        if "VULNERABLE (NOT ok)" in line:
                            self.fail("Heartbleed vulnerability on port {0}".format(node_port))
                    elif "ROBOT" in line:
                        scan_count = scan_count + 1
                        if "VULNERABLE (NOT ok)" in line:
                            self.fail("Robot vulnerability on port {0}".format(node_port))
                    elif "Secure Client-Initiated Renegotiation" in line:
                        scan_count = scan_count + 1
                        if "VULNERABLE (NOT ok)" in line:
                            self.fail("Renegotiation vulnerability on port {0}"
                                      .format(node_port))
                    elif "CRIME, TLS" in line:
                        scan_count = scan_count + 1
                        if "VULNERABLE (NOT ok)" in line:
                            self.fail("Crime vulnerability on port {0}".format(node_port))
                    elif "POODLE, SSL (CVE-2014-3566)" in line:
                        scan_count = scan_count + 1
                        if "VULNERABLE (NOT ok)" in line:
                            self.fail("Poodle vulnerability on port {0}".format(node_port))
                    elif "SWEET32" in line:
                        scan_count = scan_count + 1
                        if "VULNERABLE (NOT ok)" in line:
                            self.fail("Sweet32 vulnerability on port {0}".format(node_port))
                    elif "LOGJAM (CVE-2015-4000)" in line:
                        scan_count = scan_count + 1
                        if "VULNERABLE (NOT ok)" in line:
                            self.fail("LogJam vulnerability on port {0}".format(node_port))
                    elif "LUCKY13 (CVE-2013-0169)" in line:
                        scan_count = scan_count + 1
                        if "potentially VULNERABLE" in line:
                            self.fail("Lucky13 vulnerability on port {0}".format(node_port))
                    elif "RC4 (CVE-2013-2566, CVE-2015-2808)" in line:
                        scan_count = scan_count + 1
                        if "VULNERABLE (NOT ok)" in line:
                            self.fail("RC4 ciphers detected on port {0}".format(node_port))

                    elif "Medium grade encryption" in line:
                        scan_count = scan_count + 1
                        if "not offered (OK)" not in line:
                            self.fail("Medium grade encryption is offered on port {0}"
                                      .format(node_port))
                self.assertTrue(scan_count == 14,
                                msg="Test didn't complete all the scans for port {0}"
                                .format(node_port))
コード例 #49
0
ファイル: eventing_base.py プロジェクト: membase/testrunner
 def kill_producer(self, server):
     remote_client = RemoteMachineShellConnection(server)
     remote_client.kill_eventing_process(name="eventing-producer")
     remote_client.disconnect()
コード例 #50
0
ファイル: multiple_CA.py プロジェクト: couchbase/testrunner
    def test_cluster_works_fine_after_deleting_CA_folder(self):
        """
        1. Init node cluster. Generate x509 certs
        2. Upload root certs from any random node of the cluster
        3. Delete CA folder from that node
        4. Verify that cluster continues to operate fine by checking
            a) Failover & delta recovery of that node
            b) Failover & rebalance-out of that node
            c) Client authentication & sdk writes
        """
        self.x509.generate_multiple_x509_certs(
            servers=self.servers[:self.nodes_init])
        random_nodes = random.sample(self.servers[1:self.nodes_init], 1)
        self.log.info("Uploading root certs from {0}".format(random_nodes[0]))
        self.x509.upload_root_certs(random_nodes[0])
        self.x509.upload_node_certs(servers=self.servers[:self.nodes_init])
        self.x509.delete_unused_out_of_the_box_CAs(server=self.master)
        self.x509.upload_client_cert_settings(server=self.master)
        shell = RemoteMachineShellConnection(random_nodes[0])
        shell.remove_directory(self.x509.install_path +
                               x509main.CHAINFILEPATH + "/" +
                               x509main.TRUSTEDCAPATH)
        shell.disconnect()

        failover_nodes = random_nodes
        nodes_in_cluster = self.servers[:self.nodes_init]
        for operation in ["recovery", "out"]:
            shell = RemoteMachineShellConnection(failover_nodes[0])
            shell.stop_server()
            self.cluster.async_failover(self.servers[:self.nodes_init],
                                        failover_nodes,
                                        graceful=False)
            self.wait_for_failover_or_assert(1)
            if operation == "out":
                https_val = CbServer.use_https  # so that add_node uses https
                CbServer.use_https = True
                rest = RestConnection(self.master)
                otp_nodes = []
                ejected_nodes = []
                for node in nodes_in_cluster:
                    otp_nodes.append('ns_1@' + node.ip)
                for node in failover_nodes:
                    ejected_nodes.append('ns_1@' + node.ip)
                status = rest.rebalance(otpNodes=otp_nodes,
                                        ejectedNodes=ejected_nodes)
                if not status:
                    shell.start_server(failover_nodes[0])
                    self.fail("rebalance/failover failed")
                CbServer.use_https = https_val
                nodes_in_cluster = nodes_in_cluster.remove(failover_nodes[0])
            shell.start_server(failover_nodes[0])
            if operation == "recovery":
                rest = RestConnection(self.master)
                for node in failover_nodes:
                    rest.set_recovery_type("ns_1@" + node.ip,
                                           recoveryType="delta")
                https_val = CbServer.use_https  # so that add_node uses https
                CbServer.use_https = True
                task = self.cluster.async_rebalance(
                    self.servers[:self.nodes_init], [], [])
                self.wait_for_rebalance_to_complete(task)
                CbServer.use_https = https_val
        self.auth(servers=nodes_in_cluster)
コード例 #51
0
 def change_time_zone(self, server, timezone="UTC"):
     remote_client = RemoteMachineShellConnection(server)
     remote_client.execute_command("timedatectl set-timezone " + timezone)
     remote_client.disconnect()
コード例 #52
0
ファイル: nru_moninor.py プロジェクト: Boggypop/testrunner
class NRUMonitor(threading.Thread):

    CMD_NUM_RUNS = "/opt/couchbase/bin/cbstats localhost:11210 "\
                   "all | grep ep_num_access_scanner_runs"

    CMD_NUM_ITEMS = "/opt/couchbase/bin/cbstats localhost:11210 "\
                    "all | grep ep_access_scanner_num_items"

    CMD_RUNTIME = "/opt/couchbase/bin/cbstats localhost:11210 "\
                  "all | grep ep_access_scanner_last_runtime"

    def __init__(self, freq, reb_delay, eperf):
        self.freq = freq
        self.reb_delay = reb_delay
        self.eperf = eperf
        self.shell = None
        super(NRUMonitor, self).__init__()

    def run(self):
        print "[NRUMonitor] started running"

        # TODO: evaluate all servers, smarter polling freq
        server = self.eperf.input.servers[0]
        self.shell = RemoteMachineShellConnection(server)

        nru_num = self.nru_num = self.get_nru_num()
        if self.nru_num < 0:
            return

        while nru_num <= self.nru_num:
            print "[NRUMonitor] nru_num = %d, sleep for %d seconds"\
                  % (nru_num, self.freq)
            time.sleep(self.freq)
            nru_num = self.get_nru_num()
            if nru_num < 0:
                break

        gmt_now = time.strftime(PerfDefaults.strftime, time.gmtime())
        speed, num_items, run_time = self.get_nru_speed()

        print "[NRUMonitor] access scanner finished at: %s, speed: %s, "\
              "num_items: %s, run_time: %s"\
              % (gmt_now, speed, num_items, run_time)

        self.eperf.clear_hot_keys()

        print "[NRUMonitor] scheduled rebalance after %d seconds"\
              % self.reb_delay

        self.shell.disconnect()
        self.eperf.latched_rebalance(delay=self.reb_delay, sync=True)

        gmt_now = time.strftime(PerfDefaults.strftime, time.gmtime())
        print "[NRUMonitor] rebalance finished: %s" % gmt_now

        print "[NRUMonitor] stopped running"

    def get_nru_num(self):
        """Retrieve how many times nru access scanner has been run"""
        return self._get_shell_int(NRUMonitor.CMD_NUM_RUNS)

    def get_nru_speed(self):
        """Retrieve runtime and num_items for the last access scanner run
        Calculate access running speed

        @return (speed, num_items, run_time)
        """
        num_items = self._get_shell_int(NRUMonitor.CMD_NUM_ITEMS)

        if num_items <= 0:
            return -1, -1, -1

        run_time = self._get_shell_int(NRUMonitor.CMD_RUNTIME)

        if run_time <= 0:
            return -1, num_items, -1

        speed = num_items / run_time

        return speed, num_items, run_time

    def _get_shell_int(self, cmd):
        """Fire a shell command and return output as integer"""
        if not cmd:
            print "<_get_shell_int> invalid cmd"
            return -1

        output, error = self.shell.execute_command(cmd)

        if error:
            print "<_get_shell_int> unable to execute cmd '%s' from %s: %s"\
                  % (cmd, self.shell.ip, error)
            return -1

        if not output:
            print "<_get_shell_int> unable to execute cmd '%s' from %s: "\
                  "empty output" % (cmd, self.shell.ip)
            return -1

        try:
            num = int(output[0].split(":")[1])
        except (AttributeError, IndexError, ValueError), e:
            print "<_get_shell_int> unable to execute cmd '%s' from %s:"\
                  "output - %s, error - %s" % (cmd, self.shell.ip, output, e)
            return -1

        if num < 0:
            print "<_get_shell_int> invalid number: %d" % num
            return -1

        return num
コード例 #53
0
ファイル: hostnameTests.py プロジェクト: umang-cb/Jython
class HostnameTests(BaseTestCase):
    def setUp(self):
        super(HostnameTests, self).setUp()
        self.builds = self.input.param("builds", "2.2.0")
        self.product = self.input.param('product', 'couchbase-server')
        self.is_negative_test = self.input.param('is_negative_test', False)
        self.error = self.input.param('error', '')
        self.name = self.input.param('name', '')

    def tearDown(self):
        super(HostnameTests, self).tearDown()
        for server in self.servers:
            self.assertTrue(
                RestHelper(RestConnection(server)).is_ns_server_running(
                    timeout_in_seconds=480),
                msg="ns server is not running even after waiting for 6 minutes"
            )
        self.log.info(
            "sleep for 10 seconds to give enough time for other nodes to restart"
        )
        self.sleep(10)

    def install_builds(self, builds, servers=None):
        params = {}
        params['product'] = 'couchbase-server'
        builds_list = builds.split(";")
        st = 0
        end = 1
        if None in servers:
            servers_to_install = self.servers
        else:
            servers_to_install = servers
        for i in builds_list:
            params['version'] = i
            InstallerJob().sequential_install(servers_to_install[st:end],
                                              params)
            st = st + 1
            end = end + 1
        self.sleep(20)
        super(HostnameTests, self).setUp()

    def rest_api_addNode(self):
        hostnames = self.convert_to_hostname(self, self.servers[0:2])
        master_rest = RestConnection(self.master)
        master_rest.add_node(self.servers[1].rest_username,
                             self.servers[1].rest_password, hostnames[1],
                             self.servers[1].port)
        #Now check whether the node which we added is still referred via hostname or not.
        obj = RestConnection(self.servers[1])
        var = obj.get_nodes_self().hostname
        flag = True if self.servers[1].ip in var else False
        self.assertEqual(
            flag,
            False,
            msg=
            "Fail - Name of node {0} got converted to IP. Failing the test!!!".
            format(self.servers[1].ip))
        self.log.info("Test Passed!!")
        self.sleep(10)

#Cases 39 and 40 from hostname management test plan combined.

    def rest_api_renameNode(self):
        try:
            self.shell = RemoteMachineShellConnection(self.master)
            #com_inst_build = "cat /opt/couchbase/VERSION.txt"
            #out = self.shell.execute_command(com_inst_build.format(com_inst_build))
            self.install_builds(self.builds, self.servers[0:1])
            if self.is_negative_test:
                master_rest = RestConnection(self.master)
                self.log.info("Renaming node {0} to {1}".format(
                    self.master, self.name))
                var = master_rest.rename_node(
                    username=self.master.rest_username,
                    password=self.master.rest_password,
                    port=self.master.port,
                    hostname=self.name,
                    is_negative_test=True)
                out = var.pop()
                self.assertEqual(
                    out,
                    self.error,
                    msg=
                    "Fail to find correct error. The error should be {0}, but we got : {1}"
                    .format(self.error, out))
                self.log.info(
                    "Got correct error - {0}....Passing the test".format(out))
            else:
                self.log.info(
                    "Node {0} is referred via IP. Changing the name of the node"
                    .format(self.servers[0:1]))
                hostname = []
                info = self.shell.extract_remote_info()
                domain = ''.join(info.domain[0])
                hostname.append(info.hostname[0] + "." + domain)
                self.convert_to_hostname(self, self.servers[0:1])
                self.log.info(
                    "Calling get_node_self() to check the status of node {0}".
                    format(self.servers[0:1]))
                obj = RestConnection(self.master)
                var = obj.get_nodes_self().hostname
                flag = True if self.master.ip in var else False
                self.assertEqual(
                    flag,
                    False,
                    msg="Fail - Node {0} is still referred via IP. Should\
                                     have been referred via hostname. Failing the test!"
                    .format(self.master.ip))
                self.log.info(
                    "Name of node {0} got converted to hostname. Proceeding......!"
                    .format(self.master.ip))
                self.sleep(10)
                self.log.info(
                    "Now changing name of node {0} from hostname to IP".format(
                        self.master.ip))
                var = obj.rename_node(username='******',
                                      password='******',
                                      port='',
                                      hostname=self.master.ip)
                self.log.info(
                    "Calling get_node_self() to check the status of the node {0}"
                    .format(self.master.ip))
                var = obj.get_nodes_self().hostname
                flag = True if self.master.ip in var else False
                self.assertEqual(
                    flag,
                    True,
                    msg=
                    "Fail - Node {0} is still referred via hostname. Should have been referred via IP. Failing the test!"
                    .format(self.master.ip))
                self.log.info("Node {0} referred via IP. Pass !".format(
                    self.master.ip))
        finally:
            self.shell.disconnect()

    @staticmethod
    def convert_to_hostname(self,
                            servers_with_hostnames,
                            username='******',
                            password='******'):
        try:
            hostname = []
            for server in servers_with_hostnames:
                shell = RemoteMachineShellConnection(server)
                info = shell.extract_remote_info()
                domain = ''.join(info.domain[0])
                if not domain:
                    output = shell.execute_command_raw('nslookup %s' %
                                                       info.hostname[0])
                    print output
                    self.fail(
                        "Domain is not defined, couchbase cannot be configured correctly. NOT A BUG. CONFIGURATION ISSUE"
                    )
                hostname.append(info.hostname[0] + "." + domain)
                master_rest = RestConnection(server)
                current_hostname = master_rest.get_nodes_self().hostname
                self.log.info("get_node_self function returned : {0}".format(
                    current_hostname))
                if server.ip in current_hostname:
                    self.log.info(
                        "Node {0} is referred via IP. Need to be referred with hostname. Changing the name of the node!!"
                        .format(server.ip))
                    version = RestConnection(server).get_nodes_self().version
                    if version.startswith("1.8.1") or version.startswith(
                            "2.0.0") or version.startswith("2.0.1"):
                        RemoteUtilHelper.use_hostname_for_server_settings(
                            server)
                        master_rest.init_cluster()
                    else:
                        master_rest.init_cluster()
                        master_rest.rename_node(username=username,
                                                password=password,
                                                port='',
                                                hostname=hostname[-1])
                else:
                    self.log.info(
                        "Node {0} already referred via hostname. No need to convert the name"
                        .format(server.ip))
        finally:
            shell.disconnect()
        return hostname
コード例 #54
0
 def kill_memcached_service(self, server):
     remote_client = RemoteMachineShellConnection(server)
     remote_client.kill_memcached()
     remote_client.disconnect()
コード例 #55
0
ファイル: eventing_base.py プロジェクト: membase/testrunner
 def kill_memcached_service(self, server):
     remote_client = RemoteMachineShellConnection(server)
     remote_client.kill_memcached()
     remote_client.disconnect()
コード例 #56
0
    def test_xdcr_with_security(self):
        #Settings
        self.settings_values_map = {
            "autofailover": ["enable", None],
            "n2n": ["enable", "disable"],
            "tls": ["all", "control", "strict"]
        }

        self.apply_settings_before_setup = self._input.param(
            "apply_settings_before_setup", False)
        self.disable_autofailover = self._input.param("disable_autofailover",
                                                      False)
        self.enable_n2n = self._input.param("enable_n2n", False)
        self.enforce_tls = self._input.param("enforce_tls", None)
        self.tls_level = self._input.param("tls_level", "control")
        self.enable_autofailover = self._input.param("enable_autofailover",
                                                     False)
        self.disable_n2n = self._input.param("disable_n2n", None)
        self.disable_tls = self._input.param("disable_tls", None)

        rebalance_in = self._input.param("rebalance_in", None)
        rebalance_out = self._input.param("rebalance_out", None)
        swap_rebalance = self._input.param("swap_rebalance", None)
        failover = self._input.param("failover", None)
        graceful = self._input.param("graceful", None)
        pause = self._input.param("pause", None)
        reboot = self._input.param("reboot", None)
        initial_xdcr = self._input.param("initial_xdcr",
                                         random.choice([True, False]))
        random_setting = self._input.param("random_setting", False)
        multiple_ca = self._input.param("multiple_ca", None)
        use_client_certs = self._input.param("use_client_certs", None)
        int_ca_name = self._input.param("int_ca_name", "iclient1_clientroot")
        all_node_upload = self._input.param("all_node_upload", False)
        rotate_certs = self._input.param("rotate_certs", None)
        delete_certs = self._input.param("delete_certs", None)
        restart_pkey_nodes = self._input.param("restart_pkey_nodes", None)

        if not self.apply_settings_before_setup:
            if initial_xdcr:
                self.load_and_setup_xdcr()
            else:
                self.setup_xdcr_and_load()

        if self.enforce_tls:
            for cluster in self.get_cluster_objects_for_input(
                    self.enforce_tls):
                if self.tls_level == "rotate":
                    for level in self.settings_values_map["tls"]:
                        cluster.toggle_security_setting(
                            [cluster.get_master_node()], "tls", level)
                        time.sleep(5)
                else:
                    cluster.toggle_security_setting(
                        [cluster.get_master_node()], "tls", self.tls_level)

        #Revert to default (control) tls level
        if self.disable_tls:
            for cluster in self.get_cluster_objects_for_input(
                    self.disable_tls):
                cluster.toggle_security_setting([cluster.get_master_node()],
                                                "tls")

        if self.enable_n2n:
            for cluster in self.get_cluster_objects_for_input(self.enable_n2n):
                cluster.toggle_security_setting([cluster.get_master_node()],
                                                "n2n", "enable")

        if self.disable_n2n:
            for cluster in self.get_cluster_objects_for_input(
                    self.disable_n2n):
                cluster.toggle_security_setting([cluster.get_master_node()],
                                                "n2n")

        if self.enable_autofailover:
            for cluster in self.get_cluster_objects_for_input(
                    self.enable_autofailover):
                cluster.toggle_security_setting([cluster.get_master_node()],
                                                "autofailover", "enable")

        if self.disable_autofailover:
            for cluster in self.get_cluster_objects_for_input(
                    self.disable_autofailover):
                cluster.toggle_security_setting([cluster.get_master_node()],
                                                "autofailover")

        if random_setting:
            for cluster in self.get_cluster_objects_for_input(random_setting):
                setting = random.choice(list(self.settings_values_map.keys()))
                value = random.choice(self.settings_values_map.get(setting))
                cluster.toggle_security_setting([cluster.get_master_node()],
                                                setting, value)

        if multiple_ca:
            for cluster in self.get_cluster_objects_for_input(multiple_ca):
                master = cluster.get_master_node()
                ntonencryptionBase().disable_nton_cluster([master])
                CbServer.x509 = x509main(host=master)
                for server in cluster.get_nodes():
                    CbServer.x509.delete_inbox_folder_on_server(server=server)
                CbServer.x509.generate_multiple_x509_certs(
                    servers=cluster.get_nodes())
                if all_node_upload:
                    for node_num in range(len(cluster.get_nodes())):
                        CbServer.x509.upload_root_certs(
                            server=cluster.get_nodes()[node_num],
                            root_ca_names=[
                                CbServer.x509.root_ca_names[node_num]
                            ])
                else:
                    for server in cluster.get_nodes():
                        CbServer.x509.upload_root_certs(server)
                CbServer.x509.upload_node_certs(servers=cluster.get_nodes())
                if use_client_certs:
                    CbServer.x509.upload_client_cert_settings(server=master)
                    client_cert_path, client_key_path = CbServer.x509.get_client_cert(
                        int_ca_name=int_ca_name)
                    # Copy the certs onto the test machines
                    for server in cluster.get_nodes():
                        shell = RemoteMachineShellConnection(server)
                        shell.execute_command(
                            f"mkdir -p {os.path.dirname(client_cert_path)}")
                        shell.copy_file_local_to_remote(
                            client_cert_path, client_cert_path)
                        shell.execute_command(
                            f"mkdir -p {CbServer.x509.CACERTFILEPATH}all")
                        shell.copy_file_local_to_remote(
                            f"{CbServer.x509.CACERTFILEPATH}all/all_ca.pem",
                            f"{CbServer.x509.CACERTFILEPATH}all/all_ca.pem")
                        shell.disconnect()
                    self._client_cert = self._read_from_file(client_cert_path)
                    self._client_key = self._read_from_file(client_key_path)
                    self.add_built_in_server_user(node=master)
                ntonencryptionBase().setup_nton_cluster(
                    [master], clusterEncryptionLevel="strict")
            if rotate_certs:
                for cluster in self.get_cluster_objects_for_input(
                        rotate_certs):
                    CbServer.x509.rotate_certs(cluster.get_nodes())
            if delete_certs:
                for cluster in self.get_cluster_objects_for_input(
                        delete_certs):
                    for node in cluster.get_nodes():
                        CbServer.x509.delete_trusted_CAs(node)
            if restart_pkey_nodes:
                for cluster in self.get_cluster_objects_for_input(
                        restart_pkey_nodes):
                    for node in cluster.get_nodes():
                        shell = RemoteMachineShellConnection(node)
                        shell.restart_couchbase()
                        shell.disconnect()
                        time.sleep(10)
                        cluster.failover_and_rebalance_nodes()
                        cluster.add_back_node("delta")

        if self.apply_settings_before_setup:
            if initial_xdcr:
                self.load_and_setup_xdcr()
            else:
                self.setup_xdcr_and_load()

        if pause:
            for cluster in self.get_cluster_objects_for_input(pause):
                for remote_cluster_refs in cluster.get_remote_clusters():
                    remote_cluster_refs.pause_all_replications()
                    time.sleep(60)

        if rebalance_in:
            for cluster in self.get_cluster_objects_for_input(rebalance_in):
                cluster.rebalance_in()

        if failover:
            for cluster in self.get_cluster_objects_for_input(failover):
                cluster.failover_and_rebalance_nodes(graceful=graceful,
                                                     rebalance=True)

        if rebalance_out:
            for cluster in self.get_cluster_objects_for_input(rebalance_out):
                cluster.rebalance_out()

        if swap_rebalance:
            for cluster in self.get_cluster_objects_for_input(swap_rebalance):
                cluster.swap_rebalance()

        if pause:
            for cluster in self.get_cluster_objects_for_input(pause):
                for remote_cluster_refs in cluster.get_remote_clusters():
                    remote_cluster_refs.resume_all_replications()

        if reboot:
            for cluster in self.get_cluster_objects_for_input(reboot):
                cluster.warmup_node()
            time.sleep(60)

        self.perform_update_delete()
        self.verify_results()
コード例 #57
0
class HostnameTests(BaseTestCase):

    def setUp(self):
        super(HostnameTests, self).setUp()
        self.builds = self.input.param("builds", "2.2.0")
        self.product = self.input.param('product', 'couchbase-server')
        self.is_negative_test = self.input.param('is_negative_test', False)
        self.error = self.input.param('error', '')
        self.name = self.input.param('name', '')

    def tearDown(self):
        super(HostnameTests, self).tearDown()
        for server in self.servers:
            self.assertTrue(RestHelper(RestConnection(server)).is_ns_server_running(timeout_in_seconds=480),
                            msg="ns server is not running even after waiting for 6 minutes")
        self.log.info("sleep for 10 seconds to give enough time for other nodes to restart")
        self.sleep(10)

    def install_builds(self, builds, servers=None):
        params = {}
        params['product'] = 'couchbase-server'
        builds_list = builds.split(";")
        st = 0
        end = 1
        if None in servers:
            servers_to_install = self.servers
        else:
            servers_to_install = servers
        for i in builds_list:
            params['version'] = i
            InstallerJob().sequential_install(servers_to_install[st:end], params)
            st = st + 1
            end = end + 1
        self.sleep(20)
        super(HostnameTests, self).setUp()

    def rest_api_addNode(self):
        hostnames = self.convert_to_hostname(self, self.servers[0:2])
        master_rest = RestConnection(self.master)
        master_rest.add_node(self.servers[1].rest_username, self.servers[1].rest_password, hostnames[1], self.servers[1].port)
        #Now check whether the node which we added is still referred via hostname or not.
        obj = RestConnection(self.servers[1])
        var = obj.get_nodes_self().hostname
        flag = True if self.servers[1].ip in var else False
        self.assertEqual(flag, False, msg="Fail - Name of node {0} got converted to IP. Failing the test!!!".format(self.servers[1].ip))
        self.log.info("Test Passed!!")
        self.sleep(10)

#Cases 39 and 40 from hostname management test plan combined.
    def rest_api_renameNode(self):
        try:
            self.shell = RemoteMachineShellConnection(self.master)
            #com_inst_build = "cat /opt/couchbase/VERSION.txt"
            #out = self.shell.execute_command(com_inst_build.format(com_inst_build))
            self.install_builds(self.builds, self.servers[0:1])
            if self.is_negative_test:
                master_rest = RestConnection(self.master)
                self.log.info("Renaming node {0} to {1}".format(self.master, self.name))
                var = master_rest.rename_node(username=self.master.rest_username, password=self.master.rest_password,
                    port=self.master.port, hostname=self.name, is_negative_test=True)
                out = var.pop()
                self.assertEqual(out, self.error, msg="Fail to find correct error. The error should be {0}, but we got : {1}".format(self.error, out))
                self.log.info("Got correct error - {0}....Passing the test".format(out))
            else:
                self.log.info("Node {0} is referred via IP. Changing the name of the node".format(self.servers[0:1]))
                hostname = []
                info = self.shell.extract_remote_info()
                domain = ''.join(info.domain[0])
                hostname.append(info.hostname[0] + "." + domain)
                self.convert_to_hostname(self, self.servers[0:1])
                self.log.info("Calling get_node_self() to check the status of node {0}".format(self.servers[0:1]))
                obj = RestConnection(self.master)
                var = obj.get_nodes_self().hostname
                flag = True if self.master.ip in var else False
                self.assertEqual(flag, False, msg="Fail - Node {0} is still referred via IP. Should\
                                     have been referred via hostname. Failing the test!".format(self.master.ip))
                self.log.info("Name of node {0} got converted to hostname. Proceeding......!".format(self.master.ip))
                self.sleep(10)
                self.log.info("Now changing name of node {0} from hostname to IP".format(self.master.ip))
                var = obj.rename_node(username='******', password='******', port='', hostname=self.master.ip)
                self.log.info("Calling get_node_self() to check the status of the node {0}".format(self.master.ip))
                var = obj.get_nodes_self().hostname
                flag = True if self.master.ip in var else False
                self.assertEqual(flag, True, msg="Fail - Node {0} is still referred via hostname. Should have been referred via IP. Failing the test!".format(self.master.ip))
                self.log.info("Node {0} referred via IP. Pass !".format(self.master.ip))
        finally:
            self.shell.disconnect()


    @staticmethod
    def convert_to_hostname(self, servers_with_hostnames, username='******', password='******'):
        try:
            hostname = []
            for server in servers_with_hostnames:
                shell = RemoteMachineShellConnection(server)
                info = shell.extract_remote_info()
                domain = ''.join(info.domain[0])
                if not domain:
                    output = shell.execute_command_raw('nslookup %s' % info.hostname[0])
                    print output
                    self.fail("Domain is not defined, couchbase cannot be configured correctly. NOT A BUG. CONFIGURATION ISSUE")
                hostname.append(info.hostname[0] + "." + domain)
                master_rest = RestConnection(server)
                current_hostname = master_rest.get_nodes_self().hostname
                self.log.info("get_node_self function returned : {0}".format(current_hostname))
                if server.ip in current_hostname:
                    self.log.info("Node {0} is referred via IP. Need to be referred with hostname. Changing the name of the node!!".format(server.ip))
                    version = RestConnection(server).get_nodes_self().version
                    if version.startswith("1.8.1") or version.startswith("2.0.0") or version.startswith("2.0.1"):
                        RemoteUtilHelper.use_hostname_for_server_settings(server)
                        master_rest.init_cluster()
                    else:
                        master_rest.init_cluster()
                        master_rest.rename_node(username=username, password=password, port='', hostname=hostname[-1])
                else:
                    self.log.info("Node {0} already referred via hostname. No need to convert the name".format(server.ip))
        finally:
            shell.disconnect()
        return hostname