def run(self): remote_client = RemoteMachineShellConnection(self.server) output, error = remote_client.execute_command(self.command) print(self.server.ip) print("\n".join(output)) print("\n".join(error)) remote_client.disconnect()
def setUp(self): super(EnforceTls, self).setUp() self.sample_urls_map = \ {"http://%s:8091/nodes/self": "https://%s:18091/nodes/self", "http://%s:9102/api/v1/stats": "https://%s:19102/api/v1/stats", "http://%s:8093/admin/clusters": "https://%s:18093/admin/clusters", "http://%s:8094/api/cfg": "https://%s:18094/api/cfg", "http://%s:8096/api/v1/functions": "https://%s:18096/api/v1/functions", "http://%s:8095/analytics/node/agg/stats/remaining": "https://%s:18095/analytics/node/agg/stats/remaining", "http://%s:8097/api/v1/config": "https://%s:18097/api/v1/config"} self.log.info("Disabling AF on all nodes before beginning the test") for node in self.cluster.servers: status = RestConnection(node)\ .update_autofailover_settings(False, 120) self.assertTrue(status) self.log.info("Changing security settings to trust all CAs") trust_all_certs() self.bucket_util.load_sample_bucket(self.cluster, TravelSample()) shell = RemoteMachineShellConnection(self.cluster.master) self.curl_path = "/opt/couchbase/bin/curl" if shell.extract_remote_info().distribution_type == "windows": self.curl_path = "C:/Program Files/Couchbase/Server/bin/curl" shell.disconnect()
def _run_curl_command_from_localhost(self, cmd): """ Method to run curl cmd get request from localhost by SSHing into self.server :cmd: curl command (get request) :returns True, body(content) as a list :Raises exception if response code not in 200 series """ shell = RemoteMachineShellConnection(self.server) output, error = shell.execute_command(cmd) shell.disconnect() if error: self.log.error( "Error making Curl request on server {0} {1}".format( self.server.ip, error)) output_index = 0 for line in output: if line.startswith("HTTP"): status_code_line = line.split(" ") status_code = status_code_line[1] status_msg = " ".join(status_code_line[2:]) if status_code not in ['200', '201', '202']: raise Exception("Exception {0} {1}".format( status_code, status_msg)) elif not line.strip(): content = output[output_index + 1:] return content output_index = output_index + 1
def upload_x509_certs(self, cluster=None, servers=None, setup_once=False): """ 1. Uploads root certs and client-cert settings on servers 2. Uploads node certs on servers """ if cluster: servers = cluster.servers self.log.info("Uploading root cert to servers {0}".format(servers)) if setup_once: cluster.x509.setup_master(self.client_cert_state, self.paths, self.prefixs, self.delimeters) else: for server in cluster.servers: x509main(server).setup_master(self.client_cert_state, self.paths, self.prefixs, self.delimeters) self.log.info( "Sleeping before uploading node certs to nodes {0}".format( servers)) time.sleep(5) x509main().setup_cluster_nodes_ssl(servers, reload_cert=True) if cluster: copytree(cluster.x509.CACERTFILEPATH, cluster.CACERTFILEPATH) shell = RemoteMachineShellConnection(x509main.SLAVE_HOST) self.log.info("Removing folder {0} from slave".format( cluster.x509.CACERTFILEPATH)) shell.execute_command("rm -rf " + cluster.x509.CACERTFILEPATH)
def build_params(self, node): """ Builds parameters for node certificate,key upload """ script_file = x509main.SCRIPTFILEPATH shell = RemoteMachineShellConnection(node) if shell.extract_remote_info().distribution_type == "windows": script_file = x509main.SCRIPTWINDOWSFILEPATH shell.disconnect() params = dict() if self.encryption_type: params["privateKeyPassphrase"] = dict() params["privateKeyPassphrase"]["type"] = self.passphrase_type if self.passphrase_type == "script": params["privateKeyPassphrase"]["path"] = self.install_path + \ x509main.SCRIPTSPATH + \ script_file params["privateKeyPassphrase"]["timeout"] = self.passphrase_load_timeout params["privateKeyPassphrase"]["trim"] = 'true' if self.passphrase_script_args: params["privateKeyPassphrase"]["args"] = self.passphrase_script_args elif self.passphrase_type == "rest": params["privateKeyPassphrase"]["url"] = self.passphrase_url params["privateKeyPassphrase"]["timeout"] = self.passphrase_load_timeout params["privateKeyPassphrase"]["httpsOpts"] = self.https_opts else: params["privateKeyPassphrase"]["type"] = "plain" params["privateKeyPassphrase"]["password"] = \ self.private_key_passphrase_map[str(node.ip)] params = json.dumps(params) return params
def run(self): remote_client = RemoteMachineShellConnection(self.server) remote_client.create_file(self.script_name, self.script_content) output, error = remote_client.execute_command( "chmod 777 {0} ; {0} ; rm -f {0}".format(self.script_name)) print self.server.ip print "\n".join(output) print "\n".join(error)
def set_n2n_encryption_level_on_nodes(self, nodes, level="control"): self.log.info("Enabling n2n encryption and setting level to " "{0} on nodes {1}".format(level, nodes)) for node in nodes: shell_conn = RemoteMachineShellConnection(node) cb_cli = CbCli(shell_conn, no_ssl_verify=True) cb_cli.enable_n2n_encryption() cb_cli.set_n2n_encryption_level(level=level) shell_conn.disconnect()
def disable_n2n_encryption_cli_on_nodes(self, nodes): self.set_n2n_encryption_level_on_nodes(nodes=nodes, level="control") self.log.info("Disabling n2n encryption on nodes {0}".format(nodes)) for node in nodes: shell_conn = RemoteMachineShellConnection(node) cb_cli = CbCli(shell_conn, no_ssl_verify=True) o = cb_cli.disable_n2n_encryption() self.log.info(o) shell_conn.disconnect()
def _get_install_path(self, host): shell = RemoteMachineShellConnection(host) os_type = shell.extract_remote_info().distribution_type self.log.info("OS type is {0}".format(os_type)) if os_type == 'windows': install_path = x509main.WININSTALLPATH elif os_type == 'Mac': install_path = x509main.MACINSTALLPATH else: install_path = x509main.LININSTALLPATH return install_path
def teardown_x509_certs(self): """ 1. Regenerates root cert and removes node certs and client cert settings from server 2. Removes certs folder from slave """ self._reset_original() shell = RemoteMachineShellConnection(x509main.SLAVE_HOST) self.log.info("Removing folder {0} from slave".format( x509main.CACERTFILEPATH)) self.log.info("Removing folder {0} from slave".format( x509main.CACERTFILEPATH)) shell.execute_command("rm -rf " + x509main.CACERTFILEPATH)
def kill_indexer(self, server, timeout=10, kill_sleep_time=20): self.stop_killIndexer = False counter = 0 while not self.stop_killIndexer: indexerkill_shell = RemoteMachineShellConnection(server) counter += 1 if counter > timeout: break output, error = indexerkill_shell.kill_indexer() # output, error = remote_client.execute_command("pkill -f indexer") self.log.info("Output value is:" + str(output)) self.log.info("Counter value is {0} and max count is {1}".format( str(counter), str(timeout))) indexerkill_shell.disconnect() self.sleep(kill_sleep_time, "Waiting for indexer to warm up") self.log.info("Kill indexer process for node: {} completed".format( str(server.ip)))
def get_latest_tombstones_purged_count(self, nodes=None): """ grep debug log for the latest tombstones purged count Return dict with key = node_ip and value = ts purged count as string """ ts_purged_count_dict = dict() if nodes is None: nodes = self.cluster_util.get_nodes_in_cluster(self.cluster) for node in nodes: shell = RemoteMachineShellConnection(node) command = "grep 'tombstone_agent:purge:' /opt/couchbase/var/lib/couchbase/logs/debug.log | tail -1" output, _ = shell.execute_command(command) try: if len(output) == 0: self.log.info( "Debug.log must have got rotated; trying to find the latest gz file" ) command = "find /opt/couchbase/var/lib/couchbase/logs -name 'debug.log.*.gz' -print0 " \ "| xargs -r -0 ls -1 -t | tail -1" output, _ = shell.execute_command(command) log_file = output[0] command = "zgrep 'tombstone_agent:purge:' %s | tail -1" % log_file output, _ = shell.execute_command(command) self.log.info("On {0} {1}".format(node.ip, output)) shell.disconnect() purged_count = re.findall("Purged [0-9]+", output[0])[0].split(" ")[1] ts_purged_count_dict[node.ip] = purged_count except Exception as e: print(e) ts_purged_count_dict[node.ip] = 0 return ts_purged_count_dict
def setUp(self): self.input = TestInputSingleton.input self.log_level = self.input.param("log_level", "info").upper() self.log = logger.get("test") self.infra_log = logger.get("infra") self.infra_log_level = self.input.param("infra_log_level", "error").upper() self.log.setLevel(self.log_level) self.infra_log.setLevel(self.infra_log_level) self.servers = self.input.servers self.vm_ip = self.servers[0].ip self.rest_port = self.servers[0].port self.ssh_username = self.servers[0].ssh_username self.ssh_password = "******" self.node = ServerInfo(ip=self.vm_ip, port=self.rest_port, ssh_username=self.ssh_username, ssh_password=self.ssh_password, memcached_port=CbServer.memcached_port) self.shell = RemoteMachineShellConnection(self.node) self.skip_setup_teardown = self.input.param("skip_setup_teardown", None) self.skip_teardown = self.input.param("skip_teardown", None) self.cb_cpu_count_env = self.input.param( "cb_cpu_count_env", None) # COUCHBASE_CPU_COUNT env self.cpus = self.input.param("cpus", 2) # cpus limit for the container self.mem = self.input.param( "mem", 1073741824) # mem limit for the container in bytes # Pass service_mem_alloc as service_name1:memory_to_be_allocated-service_name2:memory_to_be_allocated service_mem_alloc = self.input.param("service_mem_alloc", None) self.service_and_memory_allocation = dict() if service_mem_alloc: temp = service_mem_alloc.split("-") for service in temp: temp2 = service.split(":") self.service_and_memory_allocation[temp2[0]] = temp2[1] if self.skip_setup_teardown is None: self.start_docker() self.remove_all_containers() self.start_couchbase_container(mem=self.mem, cpus=self.cpus) self.set_disk_paths() self.initialize_node() self.log.info("Finished CGroupBase")
def tearDown(self): shell = RemoteMachineShellConnection(self.cluster.master) cbstat_obj = Cbstats(shell) for bucket in self.bucket_util.buckets: result = cbstat_obj.all_stats( bucket.name, field_to_grep="vb_active_perc_mem_resident") self.log.info("Bucket: %s, Resident ratio(DGM): %s%%" % (bucket.name, result)) super(CollectionsDgmSteady, self).tearDown()
def wipe_config_on_removed_nodes(self, removed_nodes=None): """ Stop servers on nodes that were failed over and removed, and wipe config dir """ if removed_nodes is None: removed_nodes = self.server_to_fail for node in removed_nodes: self.log.info( "Wiping node config and restarting server on {0}".format(node)) rest = RestConnection(node) shell = RemoteMachineShellConnection(node) shell.stop_couchbase() self.sleep(10) shell.cleanup_data_config(self.data_path) shell.start_server() self.sleep(10) if not rest.is_ns_server_running(): self.log.error("ns_server {0} is not running.".format(node.ip)) shell.disconnect()
def _reset_original(self): """ 1. Regenerates root cert on all servers 2. Removes inbox folder (node certs) from all VMs """ self.log.info("Reverting to original state - regenerating certificate " "and removing inbox folder") tmp_path = "/tmp/abcd.pem" for servers in self.cluster.servers: cli_command = "ssl-manage" remote_client = RemoteMachineShellConnection(servers) options = "--regenerate-cert={0}".format(tmp_path) output, error = remote_client.execute_couchbase_cli( cli_command=cli_command, options=options, cluster_host=servers.ip, user="******", password="******") x509main(servers)._delete_inbox_folder()
def __init__(self, server): super(StatsHelper, self).__init__(server) self.server = server protocol = "https" if CbServer.use_https else "http" rest_port = CbServer.ssl_port_map.get( str(CbServer.port), CbServer.port) if CbServer.use_https else CbServer.port fts_port = CbServer.ssl_port_map.get(str(CbServer.fts_port), CbServer.fts_port) \ if CbServer.use_https else CbServer.fts_port n1ql_port = CbServer.ssl_port_map.get(str(CbServer.n1ql_port), CbServer.n1ql_port) \ if CbServer.use_https else CbServer.n1ql_port cbas_port = CbServer.ssl_port_map.get(str(CbServer.cbas_port), CbServer.cbas_port) \ if CbServer.use_https else CbServer.cbas_port eventing_port = CbServer.ssl_port_map.get(str(CbServer.eventing_port), CbServer.eventing_port) \ if CbServer.use_https else CbServer.eventing_port index_port = CbServer.ssl_port_map.get(str(CbServer.index_port), CbServer.index_port) \ if CbServer.use_https else CbServer.index_port self.base_url = "{0}://{1}:{2}".format(protocol, self.ip, rest_port) self.fts_base_url = "{0}://{1}:{2}".format(protocol, self.ip, fts_port) self.n1ql_base_url = "{0}://{1}:{2}".format(protocol, self.ip, n1ql_port) self.cbas_base_url = "{0}://{1}:{2}".format(protocol, self.ip, cbas_port) self.eventing_base_url = "{0}://{1}:{2}".format( protocol, self.ip, eventing_port) self.index_base_url = "{0}://{1}:{2}".format(protocol, self.ip, index_port) self.memcached_ssl_base_url = "http://{0}:{1}".format(self.ip, 11207) # Prometheus scrapes from KV metrics from this port, and not 11210. # Look at: /opt/couchbase/var/lib/couchbase/config/prometheus.yaml for ports self.memcached_base_url = "http://{0}:{1}".format(self.ip, 11280) self.prometheus_base_url = "http://{0}:{1}".format(self.ip, 9123) self.rest = RestClientConnection(server) self.curl_path = "curl" shell = RemoteMachineShellConnection(self.server) type = shell.extract_remote_info().distribution_type if type.lower() == 'windows': self.path = Windows.COUCHBASE_BIN_PATH self.curl_path = "%scurl" % self.path shell.disconnect()
def reset_cluster(self): try: for node in self.cluster.servers: shell = RemoteMachineShellConnection(node) # Start node rest = RestConnection(node) data_path = rest.get_data_path() # Stop node shell.stop_server() # Delete Path shell.cleanup_data_config(data_path) shell.start_server() # If Ipv6 update dist_cfg file post server restart to change distribution to IPv6 if '.com' in node.ip or ':' in node.ip: self.log.info("Updating dist_cfg for IPv6 Machines") shell.update_dist_type() self.sleep(10) except Exception, ex: self.log.info(ex)
def custom_induce_failure(self, nodes=None): """ Induce failure on nodes """ if nodes is None: nodes = self.server_to_fail for node in nodes: if self.failover_action == "stop_server": self.cluster_util.stop_server(self.cluster, node) elif self.failover_action == "firewall": self.cluster_util.start_firewall_on_node(self.cluster, node) elif self.failover_action == "stop_memcached": self.cluster_util.stop_memcached_on_node(self.cluster, node) elif self.failover_action == "kill_erlang": remote = RemoteMachineShellConnection(node) remote.info = remote.extract_remote_info() if remote.info.type.lower() == "windows": remote.kill_erlang(os="windows") else: remote.kill_erlang(os="unix") remote.disconnect()
def setUp(self): super(RebalanceStartStopTests, self).setUp() extra_nodes_in = self.input.param("extra_nodes_in", 0) extra_nodes_out = self.input.param("extra_nodes_out", 0) self.servs_init = self.servers[:self.nodes_init] self.servs_in = [self.servers[i + self.nodes_init] for i in range(self.nodes_in)] self.servs_out = [self.servers[self.nodes_init - i - 1] for i in range(self.nodes_out)] self.extra_servs_in = [self.servers[i + self.nodes_init + self.nodes_in] for i in range(extra_nodes_in)] self.extra_servs_out = [self.servers[self.nodes_init - i - 1 - self.nodes_out] for i in range(extra_nodes_out)] self.withMutationOps = self.input.param("withMutationOps", True) self.sleep_before_rebalance = self.input.param("sleep_before_rebalance", 0) if self.spec_name is not None: self.num_items = 20000 self.items = 20000 init_doc_load_spec = \ self.bucket_util.get_crud_template_from_package("initial_load") # Using the same key as defined in the loading spec self.gen_update = doc_generator( init_doc_load_spec["doc_crud"][ MetaCrudParams.DocCrud.COMMON_DOC_KEY], 0, (self.items / 2), mutation_type="SET") shell = RemoteMachineShellConnection(self.cluster.master) shell.enable_diag_eval_on_non_local_hosts() shell.disconnect()
def block_traffic_between_two_nodes(self, node1, node2): shell = RemoteMachineShellConnection(node1) self.log.info("Blocking traffic from {0} in {1}".format( node2.ip, node1.ip)) command = "iptables -A INPUT -s {0} -j DROP".format(node2.ip) shell.execute_command(command) shell.disconnect()
def setUp(self): self.input = TestInputSingleton.input self.input.test_params.update({"default_bucket": False}) super(volume, self).setUp() self.bucket_util._expiry_pager(self.cluster, val=5) self.rest = RestConnection(self.servers[0]) self.available_servers = list() self.available_servers = self.cluster.servers[self.nodes_init:] self.nodes_in_cluster = self.cluster.servers[:self.nodes_init] self.exclude_nodes = [self.cluster.master] self.skip_check_logs = False self.iterations = self.input.param("iterations", 1) self.retry_get_process_num = self.input.param("retry_get_process_num", 400) self.kv_mem_quota = self.input.param("kv_mem_quota", 22000) self.index_mem_quota = self.input.param("index_mem_quota", 22700) self.skip_index_creation_in_setup = self.input.param( "skip_index_creation_in_setup", False) self.tombstone_purge_age = self.input.param("tombstone_purge_age", 300) shell = RemoteMachineShellConnection(self.cluster.master) shell.enable_diag_eval_on_non_local_hosts() shell.disconnect() self.rest.update_tombstone_purge_age_for_removal( self.tombstone_purge_age) self.index_setup() self.fts_setup()
def split_brain(self, node1, node2): shell = RemoteMachineShellConnection(node1) self.log.debug("Blocking traffic from {0} in {1}".format( node2.ip, node1.ip)) command = "iptables -A INPUT -s {0} -j DROP".format(node2.ip) shell.execute_command(command) shell.disconnect()
def test_security_config_changed_event(self): _ = self.rest.update_autofailover_settings(False, 120) shell_conn = RemoteMachineShellConnection(self.cluster.master) cb_cli = CbCli(shell_conn) o = cb_cli.enable_n2n_encryption() self.log.info(o) shell_conn.disconnect() self.rest.set_encryption_level(level="control") settings = {"tlsMinVersion": "tlsv1.1", "clusterEncryptionLevel": "all"} self.rest.set_security_settings(settings) old_settings = {"ssl_minimum_protocol": "tlsv1.2", "cluster_encryption_level": "control"} new_settings = {"ssl_minimum_protocol": "tlsv1.1", "cluster_encryption_level": "all"} # Get the last event event = self.get_event_from_cluster() user_event = SecurityEvents.security_config_changed(self.cluster.master.ip, old_settings, new_settings) # Test NON Extra Attributes fields & NON generic fields for param, value in user_event.items(): if param == Event.Fields.EXTRA_ATTRS: continue if event[param] != value: self.fail("Value mismatch for '%s'. Expected %s != %s Actual" % (param, value, event[param])) # Test generic fields self.generic_fields_check(event) # Test Extra Attributes fields for param in ["old_settings", "new_settings"]: expected_settings = user_event[Event.Fields.EXTRA_ATTRS][param] actual_settings = event[Event.Fields.EXTRA_ATTRS][param] for i_param in ["ssl_minimum_protocol", "cluster_encryption_level"]: act_val = actual_settings[i_param] exp_val = expected_settings[i_param] if act_val != exp_val: self.fail("Mismatch in %s. Expected %s != %s Actual" % (param, exp_val, act_val))
def delete_scripts_folder_on_server(self, server=None): if server is None: server = self.host shell = RemoteMachineShellConnection(server) final_path = self.install_path + x509main.SCRIPTSPATH shell.execute_command("rm -rf " + final_path) shell.disconnect()
def create_scripts_folder_on_server(self, server=None): if server is None: server = self.host shell = RemoteMachineShellConnection(server) final_path = self.install_path + x509main.SCRIPTSPATH shell.create_directory(final_path) shell.disconnect()
def generate_root_certificate(self, root_ca_name, cn_name=None): root_ca_dir = x509main.CACERTFILEPATH + root_ca_name + "/" self.create_directory(root_ca_dir) root_ca_key_path = root_ca_dir + "ca.key" root_ca_path = root_ca_dir + "ca.pem" config_path = x509main.ROOT_CA_CONFIG shell = RemoteMachineShellConnection(self.slave_host) # create ca.key output, error = shell.execute_command("openssl genrsa " + " -out " + root_ca_key_path + " " + str(self.key_length)) self.log.info('Output message is {0} and error message is {1}'.format(output, error)) if cn_name is None: cn_name = root_ca_name # create ca.pem output, error = shell.execute_command("openssl req -config " + config_path + " -new -x509 -days 3650" + " -sha256 -key " + root_ca_key_path + " -out " + root_ca_path + " -subj '/C=UA/O=MyCompany/CN=" + cn_name + "'") self.log.info('Output message is {0} and error message is {1}'.format(output, error)) self.ca_count += 1 self.root_ca_names.append(root_ca_name) self.manifest[root_ca_name] = dict() self.manifest[root_ca_name]["path"] = root_ca_dir self.manifest[root_ca_name]["intermediate"] = dict() shell.disconnect()
def create_CA_folder_on_server(self, server=None): if server is None: server = self.host shell = RemoteMachineShellConnection(server) final_path = self.install_path + x509main.CHAINFILEPATH \ + "/" + x509main.TRUSTEDCAPATH shell.create_directory(final_path) shell.disconnect()
def tearDown(self): for server in self.cluster.servers[:self.nodes_init]: shell = RemoteMachineShellConnection(server) command = "/sbin/iptables -F" shell.execute_command(command) shell.disconnect() self.sleep(10) super(CollectionsNetworkSplit, self).tearDown()
def get_node_private_key_passphrase_script(self, server): """ Given a server object, returns the path of the bash script(which prints pkey passphrase for that node) on slave """ shell = RemoteMachineShellConnection(server) if shell.extract_remote_info().distribution_type == "windows": shell.disconnect() return self.node_ca_map[str(server.ip)]["path"] + x509main.SCRIPTWINDOWSFILEPATH else: shell.disconnect() return self.node_ca_map[str(server.ip)]["path"] + x509main.SCRIPTFILEPATH