コード例 #1
0
    def run(self):
        remote_client = RemoteMachineShellConnection(self.server)
        now = datetime.now()
        day = now.day
        month = now.month
        year = now.year
        hour = now.timetuple().tm_hour
        minute = now.timetuple().tm_min
        file_name = "%s-%s%s%s-%s%s-couch.tar.gz" % (self.server.ip,
                                                     month, day, year, hour,
                                                     minute)
        print("Collecting data files from %s\n" % self.server.ip)

        remote_client.extract_remote_info()
        data_path = self.__get_data_path(os_type=remote_client.info.type.lower())
        output, error = remote_client.execute_command("tar -zcvf {0} '{1}' >/dev/null 2>&1".
                                                      format(file_name, data_path))
        print("\n".join(output))
        print("\n".join(error))

        user_path = "/home/"
        if self.server.ssh_username == "root":
            user_path = "/"
        remote_path = "%s%s" % (user_path, self.server.ssh_username)
        status = remote_client.file_exists(remote_path, file_name)
        if not status:
            raise Exception("%s doesn't exists on server" % file_name)
        status = remote_client.get_file(remote_path, file_name,
                                        "%s/%s" % (self.path, file_name))
        if not status:
            raise Exception("Fail to download zipped logs from %s"
                            % self.server.ip)
        remote_client.execute_command("rm -f %s" % os.path.join(remote_path, file_name))
        remote_client.disconnect()
コード例 #2
0
    def run(self):
        remote_client = RemoteMachineShellConnection(self.server)
        now = datetime.now()
        day = now.day
        month = now.month
        year = now.year
        hour = now.timetuple().tm_hour
        min = now.timetuple().tm_min
        file_name = "%s-%s%s%s-%s%s-diag.zip" % (self.server.ip, month, day, year, hour, min)
        print "Collecting logs from %s\n" % self.server.ip
        output, error = remote_client.execute_cbcollect_info(file_name)
        print "\n".join(output)
        print "\n".join(error)

        user_path = "/home/"
        if self.server.ssh_username == "root":
            user_path = "/"
        remote_path = "%s%s" % (user_path, self.server.ssh_username)
        status = remote_client.file_exists(remote_path, file_name)
        if not status:
            raise Exception("%s doesn't exists on server" % file_name)
        status = remote_client.get_file(remote_path, file_name, "%s/%s" % (self.path, file_name))
        if status:
            print "Downloading zipped logs from %s" % self.server.ip
        else:
            raise Exception("Fail to download zipped logs from %s" % self.server.ip)
        remote_client.execute_command("rm -f %s" % os.path.join(remote_path, file_name))
        remote_client.disconnect()
コード例 #3
0
    def create_required_buckets(self):
        self.log.info("Get the available memory quota")
        bucket_util = bucket_utils(self.master)
        self.info = bucket_util.rest.get_nodes_self()
        threadhold_memory = 1024
        total_memory_in_mb = self.info.memoryFree / 1024 ** 2
        total_available_memory_in_mb = total_memory_in_mb
        active_service = self.info.services

        if "index" in active_service:
            total_available_memory_in_mb -= self.info.indexMemoryQuota
        if "fts" in active_service:
            total_available_memory_in_mb -= self.info.ftsMemoryQuota
        if "cbas" in active_service:
            total_available_memory_in_mb -= self.info.cbasMemoryQuota
        if "eventing" in active_service:
            total_available_memory_in_mb -= self.info.eventingMemoryQuota

        print(total_memory_in_mb)
        available_memory =  total_available_memory_in_mb - threadhold_memory
        self.rest.set_service_memoryQuota(service='memoryQuota', memoryQuota=available_memory)
        self.rest.set_service_memoryQuota(service='cbasMemoryQuota', memoryQuota=available_memory-1024)
        self.rest.set_service_memoryQuota(service='indexMemoryQuota', memoryQuota=available_memory-1024)

        self.log.info("Create CB buckets")

        self.create_bucket(self.master, "GleambookUsers",bucket_ram=available_memory)
        shell = RemoteMachineShellConnection(self.master)
        command = 'curl -i -u Administrator:password --data \'ns_bucket:update_bucket_props("GleambookUsers", [{extra_config_string, "cursor_dropping_upper_mark=70;cursor_dropping_lower_mark=50"}]).\' http://172.23.104.16:8091/diag/eval'
        shell.execute_command(command)

        result = RestConnection(self.query_node).query_tool("CREATE PRIMARY INDEX idx_GleambookUsers ON GleambookUsers;")
        self.sleep(10, "wait for index creation.")
        self.assertTrue(result['status'] == "success")
コード例 #4
0
 def remove_backup_repo(self):
     remote_client = RemoteMachineShellConnection(self.backup_node)
     output, error = remote_client.execute_command("ls " + self.backup_path)
     if not error:
         command = "rm -rf {0}".format(self.backup_path)
         output, error = remote_client.execute_command(command)
     remote_client.log_command_output(output, error)
コード例 #5
0
ファイル: collect_server_info.py プロジェクト: bharath-gp/TAF
    def run(self):
        file_name = "%s-%s-diag.zip" % (self.server.ip, time_stamp())
        if not self.local:
            from lib.remote.remote_util import RemoteMachineShellConnection
            remote_client = RemoteMachineShellConnection(self.server)
            print "Collecting logs from %s\n" % self.server.ip
            output, error = remote_client.execute_cbcollect_info(file_name)
            print "\n".join(error)

            user_path = "/home/"
            if remote_client.info.distribution_type.lower() == 'mac':
                user_path = "/Users/"
            else:
                if self.server.ssh_username == "root":
                    user_path = "/"

            remote_path = "%s%s" % (user_path, self.server.ssh_username)
            status = remote_client.file_exists(remote_path, file_name)
            if not status:
                raise Exception("%s doesn't exists on server" % file_name)
            status = remote_client.get_file(remote_path, file_name,
                                            "%s/%s" % (self.path, file_name))
            if status:
                print "Downloading zipped logs from %s" % self.server.ip
            else:
                raise Exception("Fail to download zipped logs from %s" %
                                self.server.ip)
            remote_client.execute_command("rm -f %s" %
                                          os.path.join(remote_path, file_name))
            remote_client.disconnect()
コード例 #6
0
 def convert_to_pkcs8(self, node):
     """
     converts a pkcs#1 pkey to pkcs#8 encrypted pkey
     directly by executing openssl cmds on VM.
     """
     shell = RemoteMachineShellConnection(node)
     passw = ''.join(
         random.choice(string.ascii_uppercase + string.digits)
         for _ in range(20))
     convert_cmd = self.openssl_path + " pkcs8 -in " + self.inbox_folder_path + "pkey.key" +\
                   " -passout pass:"******" -topk8 -v2 aes256 -out " + \
                   self.inbox_folder_path + "enckey.key"
     output, error = shell.execute_command(convert_cmd)
     self.log.info('Output message is {0} and error message is {1}'.format(
         output, error))
     self.plain_passw_map[node.ip] = passw
     remove_cmd = "rm -rf " + self.inbox_folder_path + "pkey.key"
     output, error = shell.execute_command(remove_cmd)
     self.log.info('Output message is {0} and error message is {1}'.format(
         output, error))
     mv_command = "mv " + self.inbox_folder_path + "enckey.key " + \
                  self.inbox_folder_path + "pkey.key"
     output, error = shell.execute_command(mv_command)
     self.log.info('Output message is {0} and error message is {1}'.format(
         output, error))
     permissions_cmd = "chmod +777 " + self.inbox_folder_path + "pkey.key"
     output, error = shell.execute_command(permissions_cmd)
     self.log.info('Output message is {0} and error message is {1}'.format(
         output, error))
     shell.disconnect()
コード例 #7
0
ファイル: csvdatatest.py プロジェクト: arod1987/testrunner
 def create_and_restore_csv(self):
     try:
         self.__load_data()
         shell_obj = RemoteMachineShellConnection(self.master)
         self.log.info("Removing backup folder if already present")
         info = shell_obj.extract_remote_info()
         path = "/tmp/backup/"
         if info.type.lower() == "windows":
             path = "/cygdrive/c" + path
         #TODO : Check for mac also
         shell_obj.delete_files(path)
         create_dir = "mkdir " + path
         data_type = "csv:"
         destination = path + "data.csv"
         shell_obj.execute_command(create_dir)
         source = "http://*****:*****@ %s" % destination)
         source, destination = destination, source
         options = "-B standard_bucket0" + self.username_arg + self.password_arg
         self.log.info("Restoring data....!")
         shell_obj.execute_cbtransfer(source, destination, options)
         self.sleep(10)
         self.log.info("Checking whether number of items loaded match with the number of items restored.")
         rest = RestConnection(self.master)
         itemCount = rest.get_bucket_json('standard_bucket0')['basicStats']['itemCount']
         self.assertEqual(itemCount, self.num_items, msg="Number of items loaded do no match\
         with the number of items restored. Number of items loaded is {0} \
         but number of items restored is {1}".format(self.num_items, itemCount))
         self.log.info("Number of items loaded = Number of items restored. Pass!!")
     finally:
         shell_obj.disconnect()
コード例 #8
0
    def run(self):
        file_name = "%s-%s-diag.zip" % (self.server.ip, time_stamp())
        if not self.local:
            from lib.remote.remote_util import RemoteMachineShellConnection

            remote_client = RemoteMachineShellConnection(self.server)
            print "Collecting logs from %s\n" % self.server.ip
            output, error = remote_client.execute_cbcollect_info(file_name)
            print "\n".join(output)
            print "\n".join(error)

            user_path = "/home/"
            if remote_client.info.distribution_type.lower() == "mac":
                user_path = "/Users/"
            else:
                if self.server.ssh_username == "root":
                    user_path = "/"

            remote_path = "%s%s" % (user_path, self.server.ssh_username)
            status = remote_client.file_exists(remote_path, file_name)
            if not status:
                raise Exception("%s doesn't exists on server" % file_name)
            status = remote_client.get_file(remote_path, file_name, "%s/%s" % (self.path, file_name))
            if status:
                print "Downloading zipped logs from %s" % self.server.ip
            else:
                raise Exception("Fail to download zipped logs from %s" % self.server.ip)
            remote_client.execute_command("rm -f %s" % os.path.join(remote_path, file_name))
            remote_client.disconnect()
コード例 #9
0
    def run(self):
        file_name = "%s-%s-couch-dbinfo.txt" % (self.server.ip.replace('[', '').replace(']', '').replace(':', '.'),
                                                time_stamp())
        if not self.local:
            from lib.remote.remote_util import RemoteMachineShellConnection
            remote_client = RemoteMachineShellConnection(self.server)
            print("Collecting dbinfo from %s\n" % self.server.ip)
            output, error = remote_client.execute_couch_dbinfo(file_name)
            print("\n".join(output))
            print("\n".join(error))

            user_path = "/home/"
            if remote_client.info.distribution_type.lower() == 'mac':
                user_path = "/Users/"
            else:
                if self.server.ssh_username == "root":
                    user_path = "/"

            remote_path = "%s%s" % (user_path, self.server.ssh_username)
            status = remote_client.file_exists(remote_path, file_name)
            if not status:
                raise Exception("%s doesn't exists on server" % file_name)
            status = remote_client.get_file(remote_path, file_name,
                                        "%s/%s" % (self.path, file_name))
            if status:
                print("Downloading dbinfo logs from %s" % self.server.ip)
            else:
                raise Exception("Fail to download db logs from %s"
                                                     % self.server.ip)
            remote_client.execute_command("rm -f %s" % os.path.join(remote_path, file_name))
            remote_client.disconnect()
コード例 #10
0
    def run(self):
        remote_client = RemoteMachineShellConnection(self.server)
        now = datetime.now()
        day = now.day
        month = now.month
        year = now.year
        hour = now.timetuple().tm_hour
        min = now.timetuple().tm_min
        file_name = "%s-%s%s%s-%s%s-diag.zip" % (self.server.ip, month, day,
                                                 year, hour, min)
        print "Collecting logs from %s\n" % self.server.ip
        output, error = remote_client.execute_cbcollect_info(file_name)
        print "\n".join(output)
        print "\n".join(error)

        user_path = "/home/"
        if self.server.ssh_username == "root":
            user_path = "/"
        remote_path = "%s%s" % (user_path, self.server.ssh_username)
        status = remote_client.file_exists(remote_path, file_name)
        if not status:
            raise Exception("%s doesn't exists on server" % file_name)
        status = remote_client.get_file(remote_path, file_name,
                                        "%s/%s" % (self.path, file_name))
        if status:
            print "Downloading zipped logs from %s" % self.server.ip
        else:
            raise Exception("Fail to download zipped logs from %s" %
                            self.server.ip)
        remote_client.execute_command("rm -f %s" %
                                      os.path.join(remote_path, file_name))
        remote_client.disconnect()
コード例 #11
0
    def run(self):
        remote_client = RemoteMachineShellConnection(self.server)
        now = datetime.now()
        day = now.day
        month = now.month
        year = now.year
        hour = now.timetuple().tm_hour
        minute = now.timetuple().tm_min
        file_name = "%s-%s%s%s-%s%s-couch.tar.gz" % (self.server.ip, month, day, year, hour, minute)
        print "Collecting data files from %s\n" % self.server.ip

        remote_client.extract_remote_info()
        data_path = self.__get_data_path(os_type=remote_client.info.type.lower())
        output, error = remote_client.execute_command(
            "tar -zcvf {0} '{1}' >/dev/null 2>&1".format(file_name, data_path)
        )
        print "\n".join(output)
        print "\n".join(error)

        user_path = "/home/"
        if self.server.ssh_username == "root":
            user_path = "/"
        remote_path = "%s%s" % (user_path, self.server.ssh_username)
        status = remote_client.file_exists(remote_path, file_name)
        if not status:
            raise Exception("%s doesn't exists on server" % file_name)
        status = remote_client.get_file(remote_path, file_name, "%s/%s" % (self.path, file_name))
        if not status:
            raise Exception("Fail to download zipped logs from %s" % self.server.ip)
        remote_client.execute_command("rm -f %s" % os.path.join(remote_path, file_name))
        remote_client.disconnect()
コード例 #12
0
 def load(self, generators_load):
     gens_load = []
     for generator_load in generators_load:
         gens_load.append(copy.deepcopy(generator_load))
     items = 0
     for gen_load in gens_load:
         items += (gen_load.end - gen_load.start)
     shell = RemoteMachineShellConnection(self.server)
     try:
         self.log.info("Delete directory's content %s/data/default/%s ..." %
                       (self.directory, self.bucket_name))
         shell.execute_command('rm -rf %s/data/default/*' % self.directory)
         self.log.info("Create directory %s/data/default/%s..." %
                       (self.directory, self.bucket_name))
         shell.execute_command('mkdir -p %s/data/default/%s' %
                               (self.directory, self.bucket_name))
         self.log.info("Load %s documents to %s/data/default/%s..." %
                       (items, self.directory, self.bucket_name))
         for gen_load in gens_load:
             for i in range(gen_load.end):
                 key, value = next(gen_load)
                 out = shell.execute_command(
                     "echo '%s' > %s/data/default/%s/%s.json" %
                     (value, self.directory, self.bucket_name, key))
         self.log.info("LOAD IS FINISHED")
     finally:
         shell.disconnect()
コード例 #13
0
ファイル: multiple_CA.py プロジェクト: couchbase/testrunner
 def load_sample_bucket(self, server, bucket_name="travel-sample"):
     shell = RemoteMachineShellConnection(server)
     shell.execute_command("""curl -v -u Administrator:password \
                          -X POST http://localhost:8091/sampleBuckets/install \
                       -d '["{0}"]'""".format(bucket_name))
     shell.disconnect()
     self.sleep(60)
コード例 #14
0
ファイル: stats.py プロジェクト: mschoch/testrunner
 def start_atop(self):
     """Start atop collector"""
     for node in self.nodes:
         shell = RemoteMachineShellConnection(node)
         cmd = "killall atop; rm -fr /tmp/*.atop;" + \
             "atop -w /tmp/{0}.atop -a 15".format(node.ip) + \
             " > /dev/null 2> /dev.null < /dev/null &"
         shell.execute_command(cmd)
コード例 #15
0
ファイル: stats.py プロジェクト: mschoch/testrunner
 def start_atop(self):
     """Start atop collector"""
     for node in self.nodes:
         shell = RemoteMachineShellConnection(node)
         cmd = "killall atop; rm -fr /tmp/*.atop;" + \
             "atop -w /tmp/{0}.atop -a 15".format(node.ip) + \
             " > /dev/null 2> /dev.null < /dev/null &"
         shell.execute_command(cmd)
コード例 #16
0
 def load_sample_buckets(self, server, bucketName):
     from lib.remote.remote_util import RemoteMachineShellConnection
     shell = RemoteMachineShellConnection(server)
     shell.execute_command("""curl -v -u Administrator:password \
                          -X POST http://{0}:8091/sampleBuckets/install \
                       -d '["{1}"]'""".format(server.ip, bucketName))
     shell.disconnect()
     self.sleep(20)
コード例 #17
0
ファイル: stats.py プロジェクト: xiejunyi/testrunner
 def stop_atop(self):
     """Stop atop collector"""
     for node in self.nodes:
         try:
             shell = RemoteMachineShellConnection(node)
         except SystemExit:
             log.error("can't establish SSH session with {0}".format(node.ip))
         else:
             shell.execute_command("killall atop")
コード例 #18
0
ファイル: stats.py プロジェクト: jdmuntacb/testrunner
 def stop_atop(self):
     """Stop atop collector"""
     for node in self.nodes:
         try:
             shell = RemoteMachineShellConnection(node)
         except SystemExit:
             log.error("can't establish SSH session with {0}".format(
                 node.ip))
         else:
             shell.execute_command("killall atop")
コード例 #19
0
ファイル: tuq_tokens.py プロジェクト: prasanna135/testrunner
 def tearDown(self):
     server = self.master
     shell = RemoteMachineShellConnection(server)
     shell.execute_command(
         """curl -X DELETE -u Administrator:password http://{0}:8091/pools/default/buckets/beer-sample""".format(
             server.ip
         )
     )
     self.sleep(20)
     super(TokenTests, self).tearDown()
コード例 #20
0
 def test_audit_event_for_authentication_failure_and_authorization_failure(
         self):
     # create a cluster admin user
     user = [{'id': 'test', 'password': '******', 'name': 'test'}]
     RbacBase().create_user_source(user, 'builtin', self.master)
     user_role_list = [{
         'id': 'test',
         'name': 'test',
         'roles': 'views_admin[*]'
     }]
     RbacBase().add_user_role(user_role_list, self.rest, 'builtin')
     self.load_data_to_collection(self.docs_per_day * self.num_docs,
                                  "src_bucket._default._default")
     body = self.create_save_function_body(
         self.function_name, HANDLER_CODE.BUCKET_OPS_ON_UPDATE)
     self.deploy_function(body)
     self.verify_doc_count_collections("dst_bucket._default._default",
                                       self.docs_per_day * self.num_docs)
     eventing_node = self.get_nodes_from_services_map(
         service_type="eventing", get_all_nodes=False)
     shell = RemoteMachineShellConnection(eventing_node)
     # audit event for authentication failure
     shell.execute_command(
         "curl -s -XGET http://Administrator:wrongpassword@localhost:8096/api/v1/stats"
     )
     expected_results_authentication_failure = {
         "real_userid:source": "internal",
         "real_userid:user": "******",
         "context": "<nil>",
         "id": 32787,
         "name": "Authentication Failure",
         "description": "Authentication failed",
         "method": "GET",
         "url": "/api/v1/stats"
     }
     self.check_config(32787, eventing_node,
                       expected_results_authentication_failure)
     # audit event for authorisation failure
     shell.execute_command(
         "curl -s -XGET http://test:password@localhost:8096/api/v1/config")
     expected_results_authorization_failure = {
         "real_userid:source": "local",
         "real_userid:user": "******",
         "context": "<nil>",
         "id": 32788,
         "name": "Authorization Failure",
         "description": "Authorization failed",
         "method": "GET",
         "url": "/api/v1/config"
     }
     self.check_config(32788, eventing_node,
                       expected_results_authorization_failure)
     shell.disconnect()
     self.undeploy_and_delete_function(body)
コード例 #21
0
ファイル: stats.py プロジェクト: xiejunyi/testrunner
 def start_atop(self):
     """Start atop collector"""
     for node in self.nodes:
         try:
             shell = RemoteMachineShellConnection(node)
         except SystemExit:
             log.error("can't establish SSH session with {0}".format(node.ip))
         else:
             cmd = "killall atop; rm -fr /tmp/*.atop;" + \
                 "atop -w /tmp/{0}.atop -a 15".format(node.ip) + \
                 " > /dev/null 2> /dev.null < /dev/null &"
             shell.execute_command(cmd)
コード例 #22
0
 def drop_data_to_bucket_from_eventing(self,server):
     shell = RemoteMachineShellConnection(server)
     shell.info = shell.extract_remote_info()
     if shell.info.type.lower() == "windows":
         raise Exception("Should not run on windows")
     o, r = shell.execute_command("/sbin/iptables -A OUTPUT -p tcp --dport 11210 -j DROP")
     shell.log_command_output(o, r)
     # o, r = shell.execute_command("/sbin/iptables -A INPUT -p tcp --dport 11210 -j DROP")
     # shell.log_command_output(o, r)
     log.info("enabled firewall on {0}".format(server))
     o, r = shell.execute_command("/sbin/iptables --list")
     shell.log_command_output(o, r)
     shell.disconnect()
コード例 #23
0
ファイル: stats.py プロジェクト: jdmuntacb/testrunner
 def start_atop(self):
     """Start atop collector"""
     for node in self.nodes:
         try:
             shell = RemoteMachineShellConnection(node)
         except SystemExit:
             log.error("can't establish SSH session with {0}".format(
                 node.ip))
         else:
             cmd = "killall atop; rm -fr /tmp/*.atop;" + \
                 "atop -w /tmp/{0}.atop -a 15".format(node.ip) + \
                 " > /dev/null 2> /dev.null < /dev/null &"
             shell.execute_command(cmd)
コード例 #24
0
ファイル: tuq_tokens.py プロジェクト: bharath-gp/testrunner
    def load_sample_buckets(self, bucketName="beer-sample" ):
        """
        Load the specified sample bucket in Couchbase
        """
        #self.cluster.bucket_delete(server=self.master, bucket="default")
        server = self.master
        shell = RemoteMachineShellConnection(server)
        shell.execute_command("""curl -v -u Administrator:password \
                             -X POST http://{0}:8091/sampleBuckets/install \
                          -d '["{1}"]'""".format(server.ip, bucketName))
        self.sleep(30)

        shell.disconnect()
コード例 #25
0
 def check_eventing_logs_for_panic(self):
     if self.input.param("skip_host_login", False):
         log.warning(
             "-->Skipping check_eventing_logs_for_panic due to skip_host_login!"
         )
         return
     self.generate_map_nodes_out_dist()
     panic_str = "panic"
     eventing_nodes = self.get_nodes_from_services_map(
         service_type="eventing", get_all_nodes=True)
     if not eventing_nodes:
         return None
     for eventing_node in eventing_nodes:
         shell = RemoteMachineShellConnection(eventing_node)
         _, dir_name = RestConnection(eventing_node).diag_eval(
             'filename:absname(element(2, application:get_env(ns_server,error_logger_mf_dir))).'
         )
         eventing_log = str(dir_name) + '/eventing.log*'
         count, err = shell.execute_command(
             "zgrep \"{0}\" {1} | wc -l".format(panic_str, eventing_log))
         if isinstance(count, list):
             count = int(count[0])
         else:
             count = int(count)
         if count > self.panic_count:
             log.info(
                 "===== PANIC OBSERVED IN EVENTING LOGS ON SERVER {0}=====".
                 format(eventing_node.ip))
             panic_trace, _ = shell.execute_command(
                 "zgrep \"{0}\" {1}".format(panic_str, eventing_log))
             log.info("\n {0}".format(panic_trace))
             self.panic_count = count
         os_info = shell.extract_remote_info()
         if os_info.type.lower() == "windows":
             # This is a fixed path in all windows systems inside couchbase
             dir_name_crash = 'c://CrashDumps'
         else:
             dir_name_crash = str(dir_name) + '/../crash/'
         core_dump_count, err = shell.execute_command(
             "ls {0}| wc -l".format(dir_name_crash))
         if isinstance(core_dump_count, list):
             core_dump_count = int(core_dump_count[0])
         else:
             core_dump_count = int(core_dump_count)
         if core_dump_count > 0:
             log.info(
                 "===== CORE DUMPS SEEN ON EVENTING NODES, SERVER {0} : {1} crashes seen ====="
                 .format(eventing_node.ip, core_dump_count))
         shell.disconnect()
コード例 #26
0
ファイル: testrunner.py プロジェクト: umang-cb/Jython
def perform_cb_collect(_input, log_path=None):
    import logger
    log = logger.Logger.get_logger()
    for node in _input.servers:
        params = dict()
        if len(_input.servers) != 1:
            params['nodes'] = 'ns_1@' + node.ip
        else:
            # In case of single node we have to pass ip as below
            params['nodes'] = 'ns_1@' + '127.0.0.1'

        log.info('Collecting log on node ' + node.ip)
        rest = RestConnection(node)
        status, _, _ = rest.perform_cb_collect(params)
        time.sleep(
            10
        )  # This is needed as it takes a few seconds before the collection start
        log.info('CB collect status on %s is %s' % (node.ip, status))

        log.info('Polling active task endpoint to check CB collect status')
        if status is True:
            cb_collect_response = {}
            while True:
                content = rest.active_tasks()
                for response in content:
                    if response['type'] == 'clusterLogsCollection':
                        cb_collect_response = response
                        break
                if cb_collect_response['status'] == 'completed':
                    log.info(cb_collect_response)
                    break
                else:
                    time.sleep(
                        10
                    )  # CB collect in progress, wait for 10 seconds and check progress again

            log.info('Copy CB collect ZIP file to Client')
            remote_client = RemoteMachineShellConnection(node)
            cb_collect_path = cb_collect_response['perNode'][
                params['nodes']]['path']
            zip_file_copied = remote_client.get_file(
                os.path.dirname(cb_collect_path),
                os.path.basename(cb_collect_path), log_path)
            log.info('%s node cb collect zip coped on client : %s' %
                     (node.ip, zip_file_copied))

            if zip_file_copied:
                remote_client.execute_command("rm -f %s" % cb_collect_path)
                remote_client.disconnect()
コード例 #27
0
class MemcachetestRunner():
    def __init__(self, server, path="/tmp/", memcached_ip="localhost", memcached_port="11211", num_items=100000, extra_params=""):
        self.server = server
        self.shell = RemoteMachineShellConnection(self.server)
        self.path = path
        self.memcached_ip = memcached_ip
        self.memcached_port = memcached_port
        self.num_items = num_items
        self.extra_params = extra_params
        self.log = logger.Logger.get_logger()

    def start_memcachetest(self):
        #check that memcachetest already installed
        exists = self.shell.file_exists('/usr/local/bin/', 'memcachetest')
        if not exists:
            #try to get from git and install
            output, error = self.shell.execute_command_raw("cd {0}; git clone git://github.com/membase/memcachetest.git".format(self.path))
            self.shell.log_command_output(output, error)
            output, error = self.shell.execute_command_raw("cd {0}/memcachetest; ./config/autorun.sh && ./configure && make install".format(self.path))
            self.shell.log_command_output(output, error)
        else:
            self.log.info("memcachetest already set on {0}:/usr/local/bin/memcachetest".format(self.server.ip, self.path))
        self.stop_memcachetest()
        return self.launch_memcachetest()

    def launch_memcachetest(self):
        command = "{0}/memcachetest/memcachetest -h {1}:{2} -i {3} {4}".format(self.path, self.memcached_ip, self.memcached_port, self.num_items, self.extra_params)
        output, error = self.shell.execute_command_raw(command)
        status = self.shell.log_command_output(output, error, track_words="downstream timeout")

    def stop_memcachetest(self):
        cmd = "killall memcachetest"
        output, error = self.shell.execute_command(cmd)
        self.shell.log_command_output(output, error)
        self.log.info("memcachetest was stopped on {0}".format(self.server.ip))
コード例 #28
0
ファイル: testssl.py プロジェクト: couchbase/testrunner
 def test_tls_min_version(self):
     """
     Verifies the TLS minimum version of the cluster with the check_version
     """
     tls_versions = ["1.3  ", "1.2  ", "1.1  ", "1  "]
     for check_version in tls_versions:
         self.log.info("Verifying for minimum version = {0}".format(check_version))
         rest = RestConnection(self.master)
         rest.set_min_tls_version(version="tlsv" + check_version.strip())
         for node in self.servers:
             self.log.info("Testing node {0}".format(node.ip))
             ports_to_scan = self.get_service_ports(node)
             ports_to_scan.extend(self.ports_to_scan)
             for node_port in ports_to_scan:
                 self.log.info("Port being tested: {0}".format(node_port))
                 cmd = self.testssl.TEST_SSL_FILENAME + " -p --warnings off --color 0 {0}:{1}" \
                     .format(node.ip, node_port)
                 self.log.info("The command is {0}".format(cmd))
                 shell = RemoteMachineShellConnection(self.slave_host)
                 output, error = shell.execute_command(cmd)
                 shell.disconnect()
                 output = output.decode().split("\n")
                 output1 = ''.join(output)
                 self.assertFalse("error" in output1.lower(), msg=output)
                 self.assertTrue("tls" in output1.lower(), msg=output)
                 for line in output:
                     for version in tls_versions:
                         if "TLS " + version in line and version >= str(check_version):
                             self.assertTrue("offered" in line,
                                             msg="TLS {0} is incorrect disabled".format(version))
                         elif "TLS " + version in line and version < str(check_version):
                             self.assertTrue("not offered" in line,
                                             msg="TLS {0} is incorrect enabled".format(version))
コード例 #29
0
ファイル: testssl.py プロジェクト: couchbase/testrunner
    def test_tls_1_dot_3_ciphers(self):
        """
        Verifies Couchbase supports all TLS 1.3 ciphers when TLS minimum version set to 1.3
        """
        rest = RestConnection(self.master)
        rest.set_min_tls_version(version="tlsv1.3")
        for node in self.servers:
            self.log.info("Testing node {0}".format(node.ip))
            ports_to_scan = self.get_service_ports(node)
            ports_to_scan.extend(self.ports_to_scan)
            for node_port in ports_to_scan:
                self.log.info("Port being tested: {0}".format(node_port))
                cmd = self.testssl.TEST_SSL_FILENAME + " -e --warnings off --color 0 {0}:{1}" \
                    .format(node.ip, node_port)
                self.log.info("The command is {0}".format(cmd))
                shell = RemoteMachineShellConnection(self.slave_host)
                output, error = shell.execute_command(cmd)
                shell.disconnect()
                output = output.decode().split("\n")
                check_next = 0
                tls_1_dot_3_ciphers = ["TLS_AES_256_GCM_SHA384", "TLS_AES_128_GCM_SHA256",
                                       "TLS_CHACHA20_POLY1305_SHA256", "TLS_AES_128_CCM_SHA256",
                                       "TLS_AES_128_CCM_8_SHA256"]
                for line in output:
                    if check_next == 1:
                        if line == '':
                            check_next = 0
                        else:
                            if line.split()[-1] not in tls_1_dot_3_ciphers:
                                self.fail("Cipher used not under TLS 1.3 supported cipher suites")

                    elif "--------" in line:
                        check_next = 1
コード例 #30
0
 def stop_measure_sched_delay(self):
     for server in self.servers:
         shell = RemoteMachineShellConnection(server)
         cmd = "killall -9 -r .*measure-sched-delays"
         output, error = shell.execute_command(cmd)
         shell.log_command_output(output, error)
         shell.disconnect()
         self.log.info("measure-sched-delays was stopped on {0}".format(server.ip))
コード例 #31
0
 def stop_measure_sched_delay(self):
     for server in self.servers:
         shell = RemoteMachineShellConnection(server)
         cmd = "killall -9 -r .*measure-sched-delays"
         output, error = shell.execute_command(cmd)
         shell.log_command_output(output, error)
         shell.disconnect()
         self.log.info("measure-sched-delays was stopped on {0}".format(
             server.ip))
コード例 #32
0
 def remove_backup(self):
     remote_client = RemoteMachineShellConnection(self.backup_node)
     cmd = "cbbackupmgr remove --archive {0} --repo backup_{1}".format(
         self.backup_path, self.rand)
     command = "{0}{1}".format(self.cli_command_location, cmd)
     output, error = remote_client.execute_command(command)
     remote_client.log_command_output(output, error)
     if error:
         raise Exception("Backup not removed successfully")
     self.is_backup_exists = False
コード例 #33
0
ファイル: csvdatatest.py プロジェクト: rayleyva/testrunner
 def create_and_restore_csv(self):
     try:
         self.__load_data()
         shell_obj = RemoteMachineShellConnection(self.master)
         self.log.info("Removing backup folder if already present")
         info = shell_obj.extract_remote_info()
         path = "/tmp/backup/"
         if info.type.lower() == "windows":
             path = "/cygdrive/c" + path
         #TODO : Check for mac also
         shell_obj.delete_files(path)
         create_dir = "mkdir " + path
         data_type = "csv:"
         destination = path + "data.csv"
         shell_obj.execute_command(create_dir)
         source = "http://*****:*****@ %s" % destination)
         source, destination = destination, source
         options = "-B standard_bucket0" + self.username_arg + self.password_arg
         self.log.info("Restoring data....!")
         shell_obj.execute_cbtransfer(source, destination, options)
         self.sleep(10)
         self.log.info(
             "Checking whether number of items loaded match with the number of items restored."
         )
         rest = RestConnection(self.master)
         itemCount = rest.get_bucket_json(
             'standard_bucket0')['basicStats']['itemCount']
         self.assertEqual(itemCount,
                          self.num_items,
                          msg="Number of items loaded do no match\
         with the number of items restored. Number of items loaded is {0} \
         but number of items restored is {1}".format(
                              self.num_items, itemCount))
         self.log.info(
             "Number of items loaded = Number of items restored. Pass!!")
     finally:
         shell_obj.disconnect()
コード例 #34
0
 def _restore_with_tool(self, mappings, namespaces, include, restore_args, use_https=False):
     if not self.is_backup_exists:
         return self.is_backup_exists, "Backup not found"
     remote_client = RemoteMachineShellConnection(self.backup_node)
     if use_https:
         command = "{0}cbbackupmgr restore --archive {1} --repo backup_{2} --cluster couchbases://{3}" \
                   " --username {4} --password {5} --force-updates {6} --no-ssl-verify".format(
             self.cli_command_location, self.backup_path, self.rand,
             self.restore_node.ip, self.restore_node.rest_username,
             self.restore_node.rest_password, self.disabled_services)
     else:
         command = "{0}cbbackupmgr restore --archive {1} --repo backup_{2} --cluster couchbase://{3}" \
                   " --username {4} --password {5} --force-updates {6}".format(
             self.cli_command_location, self.backup_path, self.rand,
             self.restore_node.ip, self.restore_node.rest_username,
             self.restore_node.rest_password, self.disabled_services)
     if not restore_args:
         mapping_args = ""
         if mappings:
             mapping_args += "--map-data "
             remappings = []
             if self.backup_bucket != self.restore_bucket:
                 bucket_mapping = "{0}={1}".format(
                     self.backup_bucket, self.restore_bucket)
                 remappings.append(bucket_mapping)
             for mapping in mappings:
                 mapping_tokens = mapping.split(":")
                 src = "{0}.{1}".format(
                     self.backup_bucket, mapping_tokens[0])
                 tgt = "{0}.{1}".format(
                     self.restore_bucket, mapping_tokens[1])
                 remappings.append("{0}={1}".format(src, tgt))
             mapping_args += ",".join(remappings)
         elif self.backup_bucket != self.restore_bucket:
             mapping_args += "--map-data "
             mapping_args += "{0}={1}".format(
                 self.backup_bucket, self.restore_bucket)
         config_args = "--include-data " if include else "--exclude-data "
         if namespaces:
             namespaces = ["{0}.{1}".format(self.backup_bucket, namespace)
                           for namespace in namespaces]
             config_args += ",".join(namespaces)
         else:
             config_args += self.backup_bucket
         restore_args = "{0} {1}".format(mapping_args, config_args)
     command = "{0} {1}".format(command, restore_args)
     output, error = remote_client.execute_command(command)
     remote_client.log_command_output(output, error)
     if error or not [x for x in output if 'Restore completed successfully'
                                           in x]:
         self.log.error(output)
         return False, output
     return True, output
コード例 #35
0
ファイル: doc_loader.py プロジェクト: EricACooper/testrunner
 def load(self, generators_load):
     gens_load = []
     for generator_load in generators_load:
         gens_load.append(copy.deepcopy(generator_load))
     items = 0
     for gen_load in gens_load:
         items += (gen_load.end - gen_load.start)
     shell = RemoteMachineShellConnection(self.server)
     try:
         self.log.info("Delete directory's content %s/data/default/%s ..." % (self.directory, self.bucket_name))
         shell.execute_command('rm -rf %s/data/default/*' % self.directory)
         self.log.info("Create directory %s/data/default/%s..." % (self.directory, self.bucket_name))
         shell.execute_command('mkdir -p %s/data/default/%s' % (self.directory, self.bucket_name))
         self.log.info("Load %s documents to %s/data/default/%s..." % (items, self.directory, self.bucket_name))
         for gen_load in gens_load:
             for i in xrange(gen_load.end):
                 key, value = gen_load.next()
                 out = shell.execute_command("echo '%s' > %s/data/default/%s/%s.json" % (value, self.directory,
                                                                                         self.bucket_name, key))
         self.log.info("LOAD IS FINISHED")
     finally:
         shell.disconnect()
コード例 #36
0
    def index_query_beer_sample(self):
        #delete default bucket
        self._cb_cluster.delete_bucket("default")
        master = self._cb_cluster.get_master_node()
        from lib.remote.remote_util import RemoteMachineShellConnection
        shell = RemoteMachineShellConnection(master)
        shell.execute_command("""curl -v -u Administrator:password \
                         -X POST http://{0}:8091/sampleBuckets/install \
                      -d '["beer-sample"]'""".format(master.ip))
        shell.disconnect()
        self.sleep(20)
        bucket = self._cb_cluster.get_bucket_by_name("beer-sample")
        index = self.create_index(bucket, "beer-index")
        self.wait_for_indexing_complete()
        self.validate_index_count(equal_bucket_doc_count=True,
                                  zero_rows_ok=False)

        query = {"match": "cafe", "field": "name"}
        hits, _, _, _ = index.execute_query(query,
                                         zero_results_ok=False,
                                         expected_hits=10)
        self.log.info("Hits: %s" % hits)
コード例 #37
0
ファイル: eventing_base.py プロジェクト: membase/testrunner
 def check_eventing_logs_for_panic(self):
     self.generate_map_nodes_out_dist()
     panic_str = "panic"
     eventing_nodes = self.get_nodes_from_services_map(service_type="eventing", get_all_nodes=True)
     if not eventing_nodes:
         return None
     for eventing_node in eventing_nodes:
         shell = RemoteMachineShellConnection(eventing_node)
         _, dir_name = RestConnection(eventing_node).diag_eval(
             'filename:absname(element(2, application:get_env(ns_server,error_logger_mf_dir))).')
         eventing_log = str(dir_name) + '/eventing.log*'
         count, err = shell.execute_command("zgrep \"{0}\" {1} | wc -l".
                                            format(panic_str, eventing_log))
         if isinstance(count, list):
             count = int(count[0])
         else:
             count = int(count)
         if count > self.panic_count:
             log.info("===== PANIC OBSERVED IN EVENTING LOGS ON SERVER {0}=====".format(eventing_node.ip))
             panic_trace, _ = shell.execute_command("zgrep \"{0}\" {1}".
                                                    format(panic_str, eventing_log))
             log.info("\n {0}".format(panic_trace))
             self.panic_count = count
         os_info = shell.extract_remote_info()
         if os_info.type.lower() == "windows":
             # This is a fixed path in all windows systems inside couchbase
             dir_name_crash = 'c://CrashDumps'
         else:
             dir_name_crash = str(dir_name) + '/../crash/'
         core_dump_count, err = shell.execute_command("ls {0}| wc -l".format(dir_name_crash))
         if isinstance(core_dump_count, list):
             core_dump_count = int(core_dump_count[0])
         else:
             core_dump_count = int(core_dump_count)
         if core_dump_count > 0:
             log.info("===== CORE DUMPS SEEN ON EVENTING NODES, SERVER {0} : {1} crashes seen =====".format(
                      eventing_node.ip, core_dump_count))
         shell.disconnect()
コード例 #38
0
    def test_audit_event_for_authentication_failure_and_authorization_failure(self):

        self._load_doc_data_all_buckets()
        for bucket in self.buckets:
            self._execute_ddoc_ops("create", self.test_with_view, self.num_ddocs, self.num_views_per_ddoc,
                                   bucket=bucket)

        self._wait_for_stats_all_buckets([self.master])
        self._verify_ddoc_ops_all_buckets()
        self._verify_ddoc_data_all_buckets()

        shell = RemoteMachineShellConnection(self.master)

        # By default views audit logs are not enabled, Run below curl command to enable all view audit events
        shell.execute_command("curl -v  POST -u Administrator:password http://localhost:8091/settings/audit -d auditdEnabled=true -d disabled=8255,20485,20488,20489,20490,20491,20493,28672,28673,28674,28675,28676,28677,28678,28679,28680,28681,28682,28683,28684,28685,28686,28687,28688,28689,28690,28691,28692,28693,28694,28695,28697,28698,28699,28700,28701,28702,28704,28705,28706,28707,28708,28709,28710,28711,28712,28713,28714,28715,28716,28717,28718,28719,28720,28721,28722,28723,28724,28725,28726,28727,28728,32770,32771,32772,32780,32783,32784,32785,32786,36867,36868,36869,36870,36871,36872,36873,36877,36878,36879,36880,45057,45061,45066,45070")

        # audit event for authentication failure
        shell.execute_command("curl -s -XGET http://Administrator:wrongpassword@localhost:8092/default/_design/dev_ddoc0/_view/views0")
        expected_results_authentication_failure = {
                                                     "auth":"Administrator",
                                                     "error":"unauthorized",
                                                     "user_agent": "curl/7.29.0",
                                                     "id": 40966,
                                                     "name": "Access denied",
                                                     "description": "Access denied to the REST API due to invalid permissions or credentials",
                                                     "method": "GET",
                                                     "url": "/default/_design/dev_ddoc0/_view/views0"
                                                   }

        self.check_config(40966, self.master, expected_results_authentication_failure)

        # create a cluster admin user
        user = [{'id': 'test', 'password': '******', 'name': 'test'}]
        RbacBase().create_user_source(user, 'builtin', self.master)
        user_role_list = [{'id': 'test', 'name': 'test', 'roles': 'cluster_admin'}]
        RbacBase().add_user_role(user_role_list, self.rest, 'builtin')

        # audit event for authorisation failure
        shell.execute_command("curl -s -XGET  http://test:password@localhost:8092/default/_design/dev_ddoc0/_view/views0")
        expected_results_authorization_failure = {
                                                     "auth": "test",
                                                     "error": "forbidden",
                                                     "user_agent": "curl/7.29.0",
                                                     "id": 40966,
                                                     "name": "Access denied",
                                                     "description": "Access denied to the REST API due to invalid permissions or credentials",
                                                     "method": "GET",
                                                     "url": "/default/_design/dev_ddoc0/_view/views0"
                                                  }
        self.check_config(40966, self.master, expected_results_authorization_failure)
        shell.disconnect()
コード例 #39
0
 def _run(self, server):
     mem = None
     try:
         if not self.local:
             from lib.remote.remote_util import RemoteMachineShellConnection
             remote_client = RemoteMachineShellConnection(server)
             print("Collecting memory info from %s\n" % server.ip)
             remote_cmd = "sh -c 'if [[ \"$OSTYPE\" == \"darwin\"* ]]; then sysctl hw.memsize|grep -Eo [0-9]; " \
                                 "else grep MemTotal /proc/meminfo|grep -Eo [0-9]; fi'"
             output, error = remote_client.execute_command(remote_cmd)
             print("\n".join(error))
             remote_client.disconnect()
             mem = int("".join(output))
     except Exception as e:
         self.fail.append((server.ip, e))
     else:
         if mem:
             self.succ[server.ip] = mem
         else:
             self.fail.append((server.ip, Exception("mem parse failed")))
コード例 #40
0
    def test_xdcr_with_security(self):
        #Settings
        self.settings_values_map = {
            "autofailover": ["enable", None],
            "n2n": ["enable", "disable"],
            "tls": ["all", "control", "strict"]
        }

        self.apply_settings_before_setup = self._input.param(
            "apply_settings_before_setup", False)
        self.disable_autofailover = self._input.param("disable_autofailover",
                                                      False)
        self.enable_n2n = self._input.param("enable_n2n", False)
        self.enforce_tls = self._input.param("enforce_tls", None)
        self.tls_level = self._input.param("tls_level", "control")
        self.enable_autofailover = self._input.param("enable_autofailover",
                                                     False)
        self.disable_n2n = self._input.param("disable_n2n", None)
        self.disable_tls = self._input.param("disable_tls", None)

        rebalance_in = self._input.param("rebalance_in", None)
        rebalance_out = self._input.param("rebalance_out", None)
        swap_rebalance = self._input.param("swap_rebalance", None)
        failover = self._input.param("failover", None)
        graceful = self._input.param("graceful", None)
        pause = self._input.param("pause", None)
        reboot = self._input.param("reboot", None)
        initial_xdcr = self._input.param("initial_xdcr",
                                         random.choice([True, False]))
        random_setting = self._input.param("random_setting", False)
        multiple_ca = self._input.param("multiple_ca", None)
        use_client_certs = self._input.param("use_client_certs", None)
        int_ca_name = self._input.param("int_ca_name", "iclient1_clientroot")
        all_node_upload = self._input.param("all_node_upload", False)
        rotate_certs = self._input.param("rotate_certs", None)
        delete_certs = self._input.param("delete_certs", None)
        restart_pkey_nodes = self._input.param("restart_pkey_nodes", None)

        if not self.apply_settings_before_setup:
            if initial_xdcr:
                self.load_and_setup_xdcr()
            else:
                self.setup_xdcr_and_load()

        if self.enforce_tls:
            for cluster in self.get_cluster_objects_for_input(
                    self.enforce_tls):
                if self.tls_level == "rotate":
                    for level in self.settings_values_map["tls"]:
                        cluster.toggle_security_setting(
                            [cluster.get_master_node()], "tls", level)
                        time.sleep(5)
                else:
                    cluster.toggle_security_setting(
                        [cluster.get_master_node()], "tls", self.tls_level)

        #Revert to default (control) tls level
        if self.disable_tls:
            for cluster in self.get_cluster_objects_for_input(
                    self.disable_tls):
                cluster.toggle_security_setting([cluster.get_master_node()],
                                                "tls")

        if self.enable_n2n:
            for cluster in self.get_cluster_objects_for_input(self.enable_n2n):
                cluster.toggle_security_setting([cluster.get_master_node()],
                                                "n2n", "enable")

        if self.disable_n2n:
            for cluster in self.get_cluster_objects_for_input(
                    self.disable_n2n):
                cluster.toggle_security_setting([cluster.get_master_node()],
                                                "n2n")

        if self.enable_autofailover:
            for cluster in self.get_cluster_objects_for_input(
                    self.enable_autofailover):
                cluster.toggle_security_setting([cluster.get_master_node()],
                                                "autofailover", "enable")

        if self.disable_autofailover:
            for cluster in self.get_cluster_objects_for_input(
                    self.disable_autofailover):
                cluster.toggle_security_setting([cluster.get_master_node()],
                                                "autofailover")

        if random_setting:
            for cluster in self.get_cluster_objects_for_input(random_setting):
                setting = random.choice(list(self.settings_values_map.keys()))
                value = random.choice(self.settings_values_map.get(setting))
                cluster.toggle_security_setting([cluster.get_master_node()],
                                                setting, value)

        if multiple_ca:
            for cluster in self.get_cluster_objects_for_input(multiple_ca):
                master = cluster.get_master_node()
                ntonencryptionBase().disable_nton_cluster([master])
                CbServer.x509 = x509main(host=master)
                for server in cluster.get_nodes():
                    CbServer.x509.delete_inbox_folder_on_server(server=server)
                CbServer.x509.generate_multiple_x509_certs(
                    servers=cluster.get_nodes())
                if all_node_upload:
                    for node_num in range(len(cluster.get_nodes())):
                        CbServer.x509.upload_root_certs(
                            server=cluster.get_nodes()[node_num],
                            root_ca_names=[
                                CbServer.x509.root_ca_names[node_num]
                            ])
                else:
                    for server in cluster.get_nodes():
                        CbServer.x509.upload_root_certs(server)
                CbServer.x509.upload_node_certs(servers=cluster.get_nodes())
                if use_client_certs:
                    CbServer.x509.upload_client_cert_settings(server=master)
                    client_cert_path, client_key_path = CbServer.x509.get_client_cert(
                        int_ca_name=int_ca_name)
                    # Copy the certs onto the test machines
                    for server in cluster.get_nodes():
                        shell = RemoteMachineShellConnection(server)
                        shell.execute_command(
                            f"mkdir -p {os.path.dirname(client_cert_path)}")
                        shell.copy_file_local_to_remote(
                            client_cert_path, client_cert_path)
                        shell.execute_command(
                            f"mkdir -p {CbServer.x509.CACERTFILEPATH}all")
                        shell.copy_file_local_to_remote(
                            f"{CbServer.x509.CACERTFILEPATH}all/all_ca.pem",
                            f"{CbServer.x509.CACERTFILEPATH}all/all_ca.pem")
                        shell.disconnect()
                    self._client_cert = self._read_from_file(client_cert_path)
                    self._client_key = self._read_from_file(client_key_path)
                    self.add_built_in_server_user(node=master)
                ntonencryptionBase().setup_nton_cluster(
                    [master], clusterEncryptionLevel="strict")
            if rotate_certs:
                for cluster in self.get_cluster_objects_for_input(
                        rotate_certs):
                    CbServer.x509.rotate_certs(cluster.get_nodes())
            if delete_certs:
                for cluster in self.get_cluster_objects_for_input(
                        delete_certs):
                    for node in cluster.get_nodes():
                        CbServer.x509.delete_trusted_CAs(node)
            if restart_pkey_nodes:
                for cluster in self.get_cluster_objects_for_input(
                        restart_pkey_nodes):
                    for node in cluster.get_nodes():
                        shell = RemoteMachineShellConnection(node)
                        shell.restart_couchbase()
                        shell.disconnect()
                        time.sleep(10)
                        cluster.failover_and_rebalance_nodes()
                        cluster.add_back_node("delta")

        if self.apply_settings_before_setup:
            if initial_xdcr:
                self.load_and_setup_xdcr()
            else:
                self.setup_xdcr_and_load()

        if pause:
            for cluster in self.get_cluster_objects_for_input(pause):
                for remote_cluster_refs in cluster.get_remote_clusters():
                    remote_cluster_refs.pause_all_replications()
                    time.sleep(60)

        if rebalance_in:
            for cluster in self.get_cluster_objects_for_input(rebalance_in):
                cluster.rebalance_in()

        if failover:
            for cluster in self.get_cluster_objects_for_input(failover):
                cluster.failover_and_rebalance_nodes(graceful=graceful,
                                                     rebalance=True)

        if rebalance_out:
            for cluster in self.get_cluster_objects_for_input(rebalance_out):
                cluster.rebalance_out()

        if swap_rebalance:
            for cluster in self.get_cluster_objects_for_input(swap_rebalance):
                cluster.swap_rebalance()

        if pause:
            for cluster in self.get_cluster_objects_for_input(pause):
                for remote_cluster_refs in cluster.get_remote_clusters():
                    remote_cluster_refs.resume_all_replications()

        if reboot:
            for cluster in self.get_cluster_objects_for_input(reboot):
                cluster.warmup_node()
            time.sleep(60)

        self.perform_update_delete()
        self.verify_results()
コード例 #41
0
 def change_time_zone(self, server, timezone="UTC"):
     remote_client = RemoteMachineShellConnection(server)
     remote_client.execute_command("timedatectl set-timezone " + timezone)
     remote_client.disconnect()
コード例 #42
0
ファイル: eventing_base.py プロジェクト: membase/testrunner
 def change_time_zone(self,server,timezone="UTC"):
     remote_client = RemoteMachineShellConnection(server)
     remote_client.execute_command("timedatectl set-timezone "+timezone)
     remote_client.disconnect()
コード例 #43
0
ファイル: tuq_tokens.py プロジェクト: prasanna135/testrunner
    def test_tokens_secondary_indexes(self):
        server = self.master
        shell = RemoteMachineShellConnection(server)
        shell.execute_command(
            """curl -v -u Administrator:password \
                             -X POST http://{0}:8091/sampleBuckets/install \
                          -d '["{1}"]'""".format(
                server.ip, "beer-sample"
            )
        )
        self.sleep(30)

        shell.disconnect()
        # bucket_name = "beer-sample"
        self.query = "create primary index on `beer-sample`"
        self.run_cbq_query()
        self.query = "create index idx1 on `beer-sample`(description,name )"
        self.run_cbq_query()
        self.query = (
            "create index idx2 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description) END ,description,name )"
        )
        self.run_cbq_query()
        self.query = 'create index idx3 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"lower","names":true,"specials":false}) END ,description,name )'
        self.run_cbq_query()
        self.query = 'create index idx4 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"upper","names":false,"specials":true}) END ,description,name )'
        self.run_cbq_query()
        self.query = 'create index idx5 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"upper","names":false}) END ,description,name )'
        self.run_cbq_query()
        self.query = 'create index idx6 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"upper"}) END ,description,name )'
        self.run_cbq_query()
        self.query = "create index idx7 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{}) END ,description,name )"
        self.run_cbq_query()
        self.query = 'create index idx8 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"":""}) END ,description,name )'
        self.run_cbq_query()
        self.query = 'create index idx9 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"random"}) END ,description,name )'
        self.run_cbq_query()
        self.query = 'create index idx10 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"names":"random"}) END ,description,name )'
        self.run_cbq_query()
        self.query = 'create index idx11 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"specials":"random"}) END ,description,name )'
        self.run_cbq_query()
        self.query = "create index idx12 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description) END )"
        self.run_cbq_query()
        self.query = (
            'create index idx13 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"lower"}) END  )'
        )
        self.run_cbq_query()
        self.query = (
            'create index idx14 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"upper"}) END )'
        )
        self.run_cbq_query()
        self.query = 'create index idx15 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"lower","names":true,"specials":false}) END  )'
        self.run_cbq_query()
        self.query = 'create index idx16 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"upper","names":false,"specials":true}) END  )'
        self.run_cbq_query()
        self.query = 'create index idx17 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"upper","names":false}) END )'
        self.run_cbq_query()
        self.query = "create index idx18 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{}) END )"
        self.run_cbq_query()
        self.query = 'create index idx19 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"":""}) END  )'
        self.run_cbq_query()
        self.query = 'create index idx20 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"random"}) END  )'
        self.run_cbq_query()
        self.query = 'create index idx21 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"names":"random"}) END  )'
        self.run_cbq_query()
        self.query = 'create index idx22 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"specials":"random"}) END  )'
        self.run_cbq_query()

        self.query = 'explain select name from `beer-sample` where any v in tokens(description) satisfies v = "golden" END limit 10'
        actual_result = self.run_cbq_query()
        plan = ExplainPlanHelper(actual_result)

        self.assertTrue(actual_result["results"])
        self.assertTrue("covers" in str(plan))
        self.assertTrue(plan["~children"][0]["~children"][0]["scan"]["index"] == "idx2")

        self.assertTrue(
            str(plan["~children"][0]["~children"][0]["scan"]["covers"][0])
            == ("cover ((distinct (array `v` for `v` in tokens((`beer-sample`.`description`)) end)))")
        )

        self.query = 'select name from `beer-sample` use index(`#primary`) where any v in tokens(reverse(description)) satisfies v = "nedlog" END order by meta().id limit 10'
        expected_result = self.run_cbq_query()

        self.query = 'select name from `beer-sample` where any v in tokens(reverse(description)) satisfies v = "nedlog" END order by meta().id limit 10'
        actual_result = self.run_cbq_query()
        # self.assertTrue(str(actual_result['results'])=="[{u'name': u'21A IPA'}, {u'name': u'Amendment Pale Ale'}, {u'name': u'Double Trouble IPA'}, {u'name': u'South Park Blonde'}, {u'name': u'Restoration Pale Ale'}, {u'name': u'S.O.S'}, {u'name': u'Satsuma Harvest Wit'}, {u'name': u'Adnams Explorer'}, {u'name': u'Shock Top'}, {u'name': u'Anniversary Maibock'}]" )
        self.assertTrue((actual_result["results"]) == (expected_result["results"]))

        self.query = 'explain select name from `beer-sample` where any v in tokens(description,{"case":"lower","names":true,"specials":false}) satisfies v = "brewery" END limit 10'
        actual_result = self.run_cbq_query()
        plan = ExplainPlanHelper(actual_result)
        self.assertTrue("covers" in str(plan))
        self.assertTrue(
            str(plan["~children"][0]["~children"][0]["scan"]["covers"][0])
            == (
                'cover ((distinct (array `v` for `v` in tokens((`beer-sample`.`description`), {"case": "lower", "names": true, "specials": false}) end)))'
            )
        )
        self.assertTrue(plan["~children"][0]["~children"][0]["scan"]["index"] == "idx3")

        self.query = 'select name from `beer-sample` use index(`#primary`) where any v in tokens(description,{"case":"lower","names":true,"specials":false}) satisfies v = "brewery" END order by meta().id limit 10'

        expected_result = self.run_cbq_query()

        self.query = 'select name from `beer-sample` use index(`idx15`) where any v in tokens(description,{"case":"lower","names":true,"specials":false}) satisfies v = "brewery" END order by meta().id limit 10'
        actual_result = self.run_cbq_query()

        self.assertTrue((actual_result["results"]) == (expected_result["results"]))

        self.query = 'explain select name from `beer-sample` use index(`idx14`) where any v in tokens(description,{"case":"upper","names":false,"specials":true}) satisfies v = "BREWERY" END order by meta().id limit 10'
        actual_result = self.run_cbq_query()
        plan = ExplainPlanHelper(actual_result)
        self.assertTrue("covers" in str(plan))
        self.assertTrue(
            str(plan["~children"][0]["~children"][0]["scan"]["covers"][0])
            == (
                'cover ((distinct (array `v` for `v` in tokens((`beer-sample`.`description`), {"case": "upper", "names": false, "specials": true}) end)))'
            )
        )
        self.assertTrue(str(plan["~children"][0]["~children"][0]["scan"]["index"]) == "idx4")

        self.query = 'select name from `beer-sample` use index(`idx16`) where any v in tokens(description,{"case":"upper","names":false,"specials":true}) satisfies v = "BREWERY" END order by meta().id limit 10'
        actual_result = self.run_cbq_query()
        self.assertTrue((actual_result["results"]) == (expected_result["results"]))

        self.query = 'explain select name from `beer-sample` where any v in tokens(description,{"case":"upper","names":false}) satisfies v = "GOLDEN" END limit 10'
        actual_result = self.run_cbq_query()
        plan = ExplainPlanHelper(actual_result)
        self.assertTrue("covers" in str(plan))
        self.assertTrue(plan["~children"][0]["~children"][0]["scan"]["index"] == "idx5")

        self.query = 'select name from `beer-sample` use index(`idx17`) where any v in tokens(description,{"case":"upper","names":false}) satisfies v = "GOLDEN" END limit 10'
        actual_result = self.run_cbq_query()

        self.query = 'select name from `beer-sample` use index(`#primary`) where any v in tokens(description,{"case":"upper","names":false}) satisfies v = "GOLDEN" END limit 10'
        expected_result = self.run_cbq_query()
        self.assertTrue(actual_result["results"] == expected_result["results"])

        self.query = 'explain select name from `beer-sample` where any v in tokens(description,{}) satisfies  v = "golden" END limit 10'
        actual_result = self.run_cbq_query()
        plan = ExplainPlanHelper(actual_result)
        self.assertTrue("covers" in str(plan))
        self.assertTrue(plan["~children"][0]["~children"][0]["scan"]["index"] == "idx7")
        self.query = 'select name from `beer-sample` use index(`idx18`) where any v in tokens(description,{}) satisfies  v = "golden" END limit 10'
        actual_result = self.run_cbq_query()

        self.query = 'select name from `beer-sample` use index(`#primary`) where any v in tokens(description,{}) satisfies  v = "golden" END limit 10'
        expected_result = self.run_cbq_query()
        self.assertTrue(actual_result["results"] == expected_result["results"])

        self.query = 'explain select name from `beer-sample` where any v in tokens(description,{"":""}) satisfies v = "golden" END limit 10'
        actual_result = self.run_cbq_query()
        plan = ExplainPlanHelper(actual_result)
        self.assertTrue("covers" in str(plan))
        self.assertTrue(plan["~children"][0]["~children"][0]["scan"]["index"] == "idx8")

        self.query = 'select name from `beer-sample` use index(`idx19`)  where any v in tokens(description,{"":""}) satisfies v = "golden" END order by name '
        actual_result = self.run_cbq_query()
        self.query = 'select name from `beer-sample` use index(`#primary`)  where any v in tokens(description,{"":""}) satisfies v = "golden" END order by name '
        expected_result = self.run_cbq_query()
        self.assertTrue(actual_result["results"] == expected_result["results"])

        self.query = 'explain select name from `beer-sample` where any v in tokens(description,{"case":"random"}) satisfies  v = "golden"  END '
        actual_result = self.run_cbq_query()
        plan = ExplainPlanHelper(actual_result)
        self.assertTrue("covers" in str(plan))
        self.assertTrue(plan["~children"][0]["scan"]["index"] == "idx9")

        self.query = 'select name from `beer-sample` use index(`idx20`) where any v in tokens(description,{"case":"random"}) satisfies  v = "golden"  END order by name '
        actual_result = self.run_cbq_query()
        self.query = 'select name from `beer-sample` use index(`#primary`) where any v in tokens(description,{"case":"random"}) satisfies  v = "golden"  END  order by name '
        expected_result = self.run_cbq_query()
        self.assertTrue(actual_result["results"] == expected_result["results"])

        self.query = 'explain select name from `beer-sample` where any v in tokens(description,{"specials":"random"}) satisfies v = "brewery" END order by name'
        actual_result = self.run_cbq_query()
        plan = ExplainPlanHelper(actual_result)
        self.assertTrue("covers" in str(plan))
        self.assertTrue(plan["~children"][0]["~children"][0]["scan"]["index"] == "idx11")

        self.query = 'select name from `beer-sample` use index(`idx22`) where any v in tokens(description,{"specials":"random"}) satisfies  v = "golden"  END  order by name'
        actual_result = self.run_cbq_query()
        self.query = 'select name from `beer-sample` use index(`#primary`) where any v in tokens(description,{"specials":"random"}) satisfies  v = "golden"  END order by name'
        expected_result = self.run_cbq_query()
        self.assertTrue(actual_result["results"] == expected_result["results"])

        self.query = 'explain select name from `beer-sample` where any v in tokens(description,{"names":"random"}) satisfies v = "brewery" END limit 10'
        actual_result = self.run_cbq_query()
        plan = ExplainPlanHelper(actual_result)
        self.assertTrue("covers" in str(plan))
        self.assertTrue(plan["~children"][0]["~children"][0]["scan"]["index"] == "idx10")

        self.query = 'select name from `beer-sample` use index(`idx21`) where any v in tokens(description,{"names":"random"}) satisfies  v = "golden"  END  limit 10'
        actual_result = self.run_cbq_query()
        self.query = 'select name from `beer-sample` use index(`#primary`) where any v in tokens(description,{"names":"random"}) satisfies  v = "golden"  END limit 10'
        expected_result = self.run_cbq_query()
        self.assertTrue(actual_result["results"] == expected_result["results"])
コード例 #44
0
ファイル: nru_moninor.py プロジェクト: Boggypop/testrunner
class NRUMonitor(threading.Thread):

    CMD_NUM_RUNS = "/opt/couchbase/bin/cbstats localhost:11210 "\
                   "all | grep ep_num_access_scanner_runs"

    CMD_NUM_ITEMS = "/opt/couchbase/bin/cbstats localhost:11210 "\
                    "all | grep ep_access_scanner_num_items"

    CMD_RUNTIME = "/opt/couchbase/bin/cbstats localhost:11210 "\
                  "all | grep ep_access_scanner_last_runtime"

    def __init__(self, freq, reb_delay, eperf):
        self.freq = freq
        self.reb_delay = reb_delay
        self.eperf = eperf
        self.shell = None
        super(NRUMonitor, self).__init__()

    def run(self):
        print "[NRUMonitor] started running"

        # TODO: evaluate all servers, smarter polling freq
        server = self.eperf.input.servers[0]
        self.shell = RemoteMachineShellConnection(server)

        nru_num = self.nru_num = self.get_nru_num()
        if self.nru_num < 0:
            return

        while nru_num <= self.nru_num:
            print "[NRUMonitor] nru_num = %d, sleep for %d seconds"\
                  % (nru_num, self.freq)
            time.sleep(self.freq)
            nru_num = self.get_nru_num()
            if nru_num < 0:
                break

        gmt_now = time.strftime(PerfDefaults.strftime, time.gmtime())
        speed, num_items, run_time = self.get_nru_speed()

        print "[NRUMonitor] access scanner finished at: %s, speed: %s, "\
              "num_items: %s, run_time: %s"\
              % (gmt_now, speed, num_items, run_time)

        self.eperf.clear_hot_keys()

        print "[NRUMonitor] scheduled rebalance after %d seconds"\
              % self.reb_delay

        self.shell.disconnect()
        self.eperf.latched_rebalance(delay=self.reb_delay, sync=True)

        gmt_now = time.strftime(PerfDefaults.strftime, time.gmtime())
        print "[NRUMonitor] rebalance finished: %s" % gmt_now

        print "[NRUMonitor] stopped running"

    def get_nru_num(self):
        """Retrieve how many times nru access scanner has been run"""
        return self._get_shell_int(NRUMonitor.CMD_NUM_RUNS)

    def get_nru_speed(self):
        """Retrieve runtime and num_items for the last access scanner run
        Calculate access running speed

        @return (speed, num_items, run_time)
        """
        num_items = self._get_shell_int(NRUMonitor.CMD_NUM_ITEMS)

        if num_items <= 0:
            return -1, -1, -1

        run_time = self._get_shell_int(NRUMonitor.CMD_RUNTIME)

        if run_time <= 0:
            return -1, num_items, -1

        speed = num_items / run_time

        return speed, num_items, run_time

    def _get_shell_int(self, cmd):
        """Fire a shell command and return output as integer"""
        if not cmd:
            print "<_get_shell_int> invalid cmd"
            return -1

        output, error = self.shell.execute_command(cmd)

        if error:
            print "<_get_shell_int> unable to execute cmd '%s' from %s: %s"\
                  % (cmd, self.shell.ip, error)
            return -1

        if not output:
            print "<_get_shell_int> unable to execute cmd '%s' from %s: "\
                  "empty output" % (cmd, self.shell.ip)
            return -1

        try:
            num = int(output[0].split(":")[1])
        except (AttributeError, IndexError, ValueError), e:
            print "<_get_shell_int> unable to execute cmd '%s' from %s:"\
                  "output - %s, error - %s" % (cmd, self.shell.ip, output, e)
            return -1

        if num < 0:
            print "<_get_shell_int> invalid number: %d" % num
            return -1

        return num
コード例 #45
0
class NRUMonitor(threading.Thread):

    CMD_NUM_RUNS = "/opt/couchbase/bin/cbstats localhost:11210 "\
                   "all | grep ep_num_access_scanner_runs"

    CMD_NUM_ITEMS = "/opt/couchbase/bin/cbstats localhost:11210 "\
                    "all | grep ep_access_scanner_num_items"

    CMD_RUNTIME = "/opt/couchbase/bin/cbstats localhost:11210 "\
                  "all | grep ep_access_scanner_last_runtime"

    def __init__(self, freq, reb_delay, eperf):
        self.freq = freq
        self.reb_delay = reb_delay
        self.eperf = eperf
        self.shell = None
        super(NRUMonitor, self).__init__()

    def run(self):
        print("[NRUMonitor] started running")

        # TODO: evaluate all servers, smarter polling freq
        server = self.eperf.input.servers[0]
        self.shell = RemoteMachineShellConnection(server)

        nru_num = self.nru_num = self.get_nru_num()
        if self.nru_num < 0:
            return

        while nru_num <= self.nru_num:
            print("[NRUMonitor] nru_num = %d, sleep for %d seconds"\
                  % (nru_num, self.freq))
            time.sleep(self.freq)
            nru_num = self.get_nru_num()
            if nru_num < 0:
                break

        gmt_now = time.strftime(PerfDefaults.strftime, time.gmtime())
        speed, num_items, run_time = self.get_nru_speed()

        print("[NRUMonitor] access scanner finished at: %s, speed: %s, "\
              "num_items: %s, run_time: %s"\
              % (gmt_now, speed, num_items, run_time))

        self.eperf.clear_hot_keys()

        print("[NRUMonitor] scheduled rebalance after %d seconds"\
              % self.reb_delay)

        self.shell.disconnect()
        self.eperf.latched_rebalance(delay=self.reb_delay, sync=True)

        gmt_now = time.strftime(PerfDefaults.strftime, time.gmtime())
        print("[NRUMonitor] rebalance finished: %s" % gmt_now)

        print("[NRUMonitor] stopped running")

    def get_nru_num(self):
        """Retrieve how many times nru access scanner has been run"""
        return self._get_shell_int(NRUMonitor.CMD_NUM_RUNS)

    def get_nru_speed(self):
        """Retrieve runtime and num_items for the last access scanner run
        Calculate access running speed

        @return (speed, num_items, run_time)
        """
        num_items = self._get_shell_int(NRUMonitor.CMD_NUM_ITEMS)

        if num_items <= 0:
            return -1, -1, -1

        run_time = self._get_shell_int(NRUMonitor.CMD_RUNTIME)

        if run_time <= 0:
            return -1, num_items, -1

        speed = num_items // run_time

        return speed, num_items, run_time

    def _get_shell_int(self, cmd):
        """Fire a shell command and return output as integer"""
        if not cmd:
            print("<_get_shell_int> invalid cmd")
            return -1

        output, error = self.shell.execute_command(cmd)

        if error:
            print("<_get_shell_int> unable to execute cmd '%s' from %s: %s"\
                  % (cmd, self.shell.ip, error))
            return -1

        if not output:
            print("<_get_shell_int> unable to execute cmd '%s' from %s: "\
                  "empty output" % (cmd, self.shell.ip))
            return -1

        try:
            num = int(output[0].split(":")[1])
        except (AttributeError, IndexError, ValueError) as e:
            print("<_get_shell_int> unable to execute cmd '%s' from %s:"\
                  "output - %s, error - %s" % (cmd, self.shell.ip, output, e))
            return -1

        if num < 0:
            print("<_get_shell_int> invalid number: %d" % num)
            return -1

        return num
コード例 #46
0
ファイル: stats.py プロジェクト: mschoch/testrunner
 def stop_atop(self,):
     """Stop atop collector"""
     for node in self.nodes:
         shell = RemoteMachineShellConnection(node)
         shell.execute_command("killall atop")
コード例 #47
0
ファイル: cbqe3043.py プロジェクト: arod1987/testrunner
        for o, _ in opts:
            if o == "-h":
                usage()
 
            _input = TestInput.TestInputParser.get_test_input(sys.argv)
            if not _input.servers:
                usage("ERROR: no servers specified. Please use the -i parameter.")
    except IndexError:
        usage()
    except getopt.GetoptError, error:
        usage("ERROR: " + str(error))

    for server in _input.servers:
        shell = RemoteMachineShellConnection(server)
        command = "mount | grep '/cbqe3043'"
        output, error = shell.execute_command(command)
        if len(error) > 0:
            raise Exception("Unable to determine if a partition of 20MB already exists on " + server.ip)
        if len(output) > 0:
            print "/cbqe3043 partition is already mounted on " + server.ip
        if len(output) == 0:
            print "/cbqe3043 not mounted on " + server.ip
            print "creating /cbqe3043 on " + server.ip
            command = "rm -rf /cbqe3043; mkdir -p /cbqe3043"
            output, _ = shell.execute_command(command)
            if len(output) > 0:
                raise Exception("Unable to create directory /cbqe3043 on " + server.ip)
            print "creating /usr/disk-img on " + server.ip
            command = "rm -rf /usr/disk-img; mkdir -p /usr/disk-img"
            output, _ = shell.execute_command(command)
            if len(output) > 0:
コード例 #48
0
ファイル: testssl.py プロジェクト: couchbase/testrunner
    def test_tls_ciphers_used(self):
        """
        Checks cipher-suites used is a subset of preconfigured list of cipher-suites.
        Checks for TLS 1.2 and TLS 1.3
        """
        for node in self.servers:
            self.log.info("Testing node {0}".format(node.ip))
            ports_to_scan = self.get_service_ports(node)
            ports_to_scan.extend(self.ports_to_scan)
            for node_port in ports_to_scan:
                self.log.info("Port being tested: {0}".format(node_port))
                cmd = self.testssl.TEST_SSL_FILENAME + " --warnings off --color 0 {0}:{1}" \
                    .format(node.ip, node_port)
                self.log.info("The command is {0}".format(cmd))
                shell = RemoteMachineShellConnection(self.slave_host)
                output, error = shell.execute_command(cmd)
                shell.disconnect()
                output = output.decode().split("\n")
                check_next = 0
                stmt = ""
                tls_1_dot_2_obtained_list = []
                tls_1_dot_3_obtained_list = []
                for line in output:
                    if check_next == 1:
                        if line == "":
                            check_next = 0
                            stmt = ""
                        elif "TLSv1.3 (" in line:
                            stmt = "TLSv1.3 ("
                        elif stmt == "TLSv1.2 (":
                            tls_1_dot_2_obtained_list.append(line.split()[-1])
                        elif stmt == "TLSv1.3 (":
                            tls_1_dot_3_obtained_list.append(line.split()[-1])
                    elif "TLSv1.2 (" in line:
                        check_next = 1
                        stmt = "TLSv1.2 ("

                # Get the preconfigured list of cipher-suites
                shell = RemoteMachineShellConnection(self.master)
                output, error = shell.execute_couchbase_cli(cli_command="setting-security",
                                                            options="--get",
                                                            cluster_host="localhost",
                                                            user="******",
                                                            password="******")
                shell.disconnect()
                content = json.loads(output[0])
                services_ports_map = {11207: "data", 18094: "fullTextSearch", 19102: "index",
                                      18096: "eventing", 18093: "query", 18095: "analytics",
                                      18097: "backup", 18091: "clusterManager",
                                      18092: "clusterManager"}
                cipher_order_list = content[services_ports_map[node_port]]["supportedCipherSuites"]

                # Verifies TLS 1.2 cipher-suites is a subset of preconfigured list of
                # cipher-suites
                is_present = False
                if all(ciphers in cipher_order_list for ciphers in tls_1_dot_2_obtained_list):
                    is_present = True
                self.assertTrue(is_present, msg="Obtained list of TLS 1.2 cipher-suites is not a "
                                                "subset of pre-configured list of cipher-suites on "
                                                "port: {0} :: service: {1}"
                                .format(node_port, services_ports_map[node_port]))

                # Verifies TLS 1.3 cipher-suites is a subset of preconfigured list of
                # cipher-suites
                is_present = False
                if all(ciphers in cipher_order_list for ciphers in tls_1_dot_3_obtained_list):
                    is_present = True
                self.assertTrue(is_present, msg="Obtained list of TLS 1.3 cipher-suites is not a "
                                                "subset of pre-configured list of cipher-suites on "
                                                "port: {0} :: service: {1}"
                                .format(node_port, services_ports_map[node_port]))