def fetch_sync_gateway_logs(cluster_config, prefix): ansible_runner = AnsibleRunner(cluster_config) log_info("Pulling sync_gateway / sg_accel logs") # fetch logs from sync_gateway instances status = ansible_runner.run_ansible_playbook("fetch-sync-gateway-logs.yml") if status != 0: raise CollectionError("Could not pull logs") # zip logs and timestamp if os.path.isdir("/tmp/sg_logs"): date_time = time.strftime("%Y-%m-%d-%H-%M-%S") temp_log_path = "/tmp/{}-{}-sglogs".format(prefix, date_time) shutil.make_archive(temp_log_path, "zip", "/tmp/sg_logs") shutil.rmtree("/tmp/sg_logs") # Copy logs to results dir zip_file_path = "{}.zip".format(temp_log_path) log_results_location = "{}/logs".format(RESULTS_DIR) shutil.copy(zip_file_path, log_results_location) zip_name = "{}-{}-sglogs.zip".format(prefix, date_time) result_zip = "{}/{}".format(log_results_location, zip_name) log_info("sync_gateway logs copied to {}".format(result_zip)) return result_zip else: raise CollectionError("Error finding pulled logs at /tmp/sg_logs")
def install_couchbase_server(cluster_config, couchbase_server_config, cbs_platform="centos7"): log_info(cluster_config) log_info(couchbase_server_config) ansible_runner = AnsibleRunner(cluster_config) cluster_keywords = ClusterKeywords() cluster_topology = cluster_keywords.get_cluster_topology(cluster_config) server_url = cluster_topology["couchbase_servers"][0] cb_server = CouchbaseServer(server_url) log_info(">>> Installing Couchbase Server") # Install Server server_baseurl, server_package_name = couchbase_server_config.get_baseurl_package( cb_server, cbs_platform) status = ansible_runner.run_ansible_playbook( "install-couchbase-server-package.yml", extra_vars={ "couchbase_server_package_base_url": server_baseurl, "couchbase_server_package_name": server_package_name }) if status != 0: raise ProvisioningError("Failed to install Couchbase Server") # Wait for server to be in 'healthy state' print(">>> Waiting for server to be in 'healthy' state") cb_server.wait_for_ready_state()
def push_cbcollect_info_supportal(cluster_config): """ 1. Runs cbcollect_info on one of the couchbase server nodes 2. Pushes to supportal.couchbase.com """ ansible_runner = AnsibleRunner(config=cluster_config) status = ansible_runner.run_ansible_playbook( "push-cbcollect-info-supportal.yml") assert status == 0, "Failed to push cbcollect info"
def upgrade_server(self, cluster_config, server_version_build, cbs_platform, target=None, toy_build=None): ansible_runner = AnsibleRunner(cluster_config) log_info(">>> Upgrading Couchbase Server") # Install Server if toy_build: # http://server.jenkins.couchbase.com/view/All/job/watson-toy/1770/artifact/couchbase-server-enterprise-5.0.0-9900-centos7.x86_64.rpm toy_build_url_parts = toy_build.split('/') toy_build_url_len = len(toy_build_url_parts) server_package_name = toy_build_url_parts[-1] server_baseurl = "/".join( toy_build_url_parts[0:(toy_build_url_len - 1)]) else: version_build = server_version_build.split("-") server_verion = version_build[0] if len(version_build) == 2: # Build number is included server_build = version_build[1] else: server_build = None if server_build is None: server_baseurl, server_package_name = self.resolve_cb_mobile_url( server_verion, cbs_platform) else: server_baseurl, server_package_name = self.resolve_cb_nas_url( server_verion, server_build, cbs_platform) if target is not None: target = hostname_for_url(cluster_config, target) log_info("Upgrading Couchbase server on {} ...".format(target)) status = ansible_runner.run_ansible_playbook( "upgrade-couchbase-server-package.yml", subset=target, extra_vars={ "couchbase_server_package_base_url": server_baseurl, "couchbase_server_package_name": server_package_name }) else: log_info("Upgrading Couchbase server on all nodes") status = ansible_runner.run_ansible_playbook( "upgrade-couchbase-server-package.yml", extra_vars={ "couchbase_server_package_base_url": server_baseurl, "couchbase_server_package_name": server_package_name }) if status != 0: raise ProvisioningError("Failed to install Couchbase Server") self.wait_for_ready_state()
def __init__(self, cluster_config, target): self.ansible_runner = AnsibleRunner(cluster_config) self.ip = target["ip"] self.url = "http://{}:4985".format(target["ip"]) self.hostname = target["name"] self.cluster_config = cluster_config self.server_port = 8091 self.server_scheme = "http" if is_cbs_ssl_enabled(self.cluster_config): self.server_port = 18091 self.server_scheme = "https"
def start_sync_gateways(self, cluster_config, url=None, config=None): """ Start sync gateways in a cluster. If url is passed, start the sync gateway at that url """ if config is None: raise ProvisioningError( "Starting a Sync Gateway requires a config") ansible_runner = AnsibleRunner(cluster_config) config_path = os.path.abspath(config) couchbase_server_primary_node = add_cbs_to_sg_config_server_field( cluster_config) if is_cbs_ssl_enabled(cluster_config): self.server_port = 18091 self.server_scheme = "https" playbook_vars = { "sync_gateway_config_filepath": config_path, "server_port": self.server_port, "server_scheme": self.server_scheme, "autoimport": "", "xattrs": "", "no_conflicts": "", "revs_limit": "", "couchbase_server_primary_node": couchbase_server_primary_node } if is_xattrs_enabled(cluster_config): playbook_vars["autoimport"] = '"import_docs": "continuous",' playbook_vars["xattrs"] = '"enable_shared_bucket_access": true,' if no_conflicts_enabled(cluster_config): playbook_vars["no_conflicts"] = '"allow_conflicts": false,' try: revs_limit = get_revs_limit(cluster_config) playbook_vars["revs_limit"] = '"revs_limit": {},'.format( revs_limit) except KeyError as ex: log_info("Keyerror in getting revs_limit{}".format(ex.message)) if url is not None: target = hostname_for_url(cluster_config, url) log_info("Starting {} sync_gateway.".format(target)) status = ansible_runner.run_ansible_playbook( "start-sync-gateway.yml", extra_vars=playbook_vars, subset=target) else: log_info("Starting all sync_gateways.") status = ansible_runner.run_ansible_playbook( "start-sync-gateway.yml", extra_vars=playbook_vars) if status != 0: raise ProvisioningError("Could not start sync_gateway")
def collect_packet_capture(self, cluster_config, test_name): ansible_runner = AnsibleRunner(config=cluster_config) status = ansible_runner.run_ansible_playbook("collect-ngrep.yml") if status != 0: raise ProvisioningError("Failed to collect packet capture") # zip logs and timestamp if os.path.isdir("/tmp/sys-logs"): date_time = time.strftime("%Y-%m-%d-%H-%M-%S") name = "/tmp/ngrep-{}-{}-output".format(test_name, date_time) shutil.make_archive(name, "zip", "/tmp/sys-logs") shutil.rmtree("/tmp/sys-logs") print("ngrep logs copied here {}.zip\n".format(name))
def upgrade_sync_gateways(self, cluster_config, sg_conf, sync_gateway_version, url=None): """ Upgrade sync gateways in a cluster. If url is passed, upgrade the sync gateway at that url """ ansible_runner = AnsibleRunner(cluster_config) from libraries.provision.install_sync_gateway import SyncGatewayConfig version, build = version_and_build(sync_gateway_version) sg_config = SyncGatewayConfig(commit=None, version_number=version, build_number=build, config_path=sg_conf, build_flags="", skip_bucketcreation=False) sg_conf = os.path.abspath(sg_config.config_path) # Shared vars playbook_vars = {} sync_gateway_base_url, sync_gateway_package_name, sg_accel_package_name = sg_config.sync_gateway_base_url_and_package( ) playbook_vars[ "couchbase_sync_gateway_package_base_url"] = sync_gateway_base_url playbook_vars[ "couchbase_sync_gateway_package"] = sync_gateway_package_name playbook_vars["couchbase_sg_accel_package"] = sg_accel_package_name if url is not None: target = hostname_for_url(cluster_config, url) log_info( "Upgrading sync_gateway/sg_accel on {} ...".format(target)) status = ansible_runner.run_ansible_playbook( "upgrade-sg-sgaccel-package.yml", subset=target, extra_vars=playbook_vars) log_info("Completed upgrading {}".format(url)) else: log_info("Upgrading all sync_gateways/sg_accels") status = ansible_runner.run_ansible_playbook( "upgrade-sg-sgaccel-package.yml", extra_vars=playbook_vars) log_info("Completed upgrading all sync_gateways/sg_accels") if status != 0: raise Exception("Could not upgrade sync_gateway/sg_accel")
def kill_gateload(): try: cluster_config = os.environ["CLUSTER_CONFIG"] except KeyError: print( "Make sure CLUSTER_CONFIG is defined and pointing to the configuration you would like to provision" ) sys.exit(1) print("Running perf test against cluster: {}".format(cluster_config)) ansible_runner = AnsibleRunner(cluster_config) status = ansible_runner.run_ansible_playbook("kill-gateload.yml") if status != 0: print( "Killing gateload returned non-zero status: {}. Most likely it was no longer running" .format(status))
def restart_sync_gateways(self, cluster_config, url=None): """ Restart sync gateways in a cluster. If url is passed, restart the sync gateway at that url """ ansible_runner = AnsibleRunner(cluster_config) if url is not None: target = hostname_for_url(cluster_config, url) log_info("Restarting sync_gateway on {} ...".format(target)) status = ansible_runner.run_ansible_playbook( "restart-sync-gateway.yml", subset=target) else: log_info("Restarting all sync_gateways") status = ansible_runner.run_ansible_playbook( "restart-sync-gateway.yml", ) if status != 0: raise ProvisioningError("Could not restart sync_gateway")
def enable_import_xattrs(self, cluster_config, sg_conf, url, enable_import=False): """Deploy an SG config with xattrs enabled Will also enable import if enable_import is set to True It is used to enable xattrs and import in the SG config""" ansible_runner = AnsibleRunner(cluster_config) server_port = 8091 server_scheme = "http" if is_cbs_ssl_enabled(cluster_config): server_port = 18091 server_scheme = "https" # Shared vars playbook_vars = { "sync_gateway_config_filepath": sg_conf, "server_port": server_port, "server_scheme": server_scheme, "autoimport": "", "xattrs": "" } if is_xattrs_enabled(cluster_config): playbook_vars["xattrs"] = '"enable_shared_bucket_access": true,' if is_xattrs_enabled(cluster_config) and enable_import: playbook_vars["autoimport"] = '"import_docs": "continuous",' # Deploy config if url is not None: target = hostname_for_url(cluster_config, url) log_info("Deploying sync_gateway config on {} ...".format(target)) status = ansible_runner.run_ansible_playbook( "deploy-sync-gateway-config.yml", subset=target, extra_vars=playbook_vars) else: log_info("Deploying config on all sync_gateways") status = ansible_runner.run_ansible_playbook( "deploy-sync-gateway-config.yml", extra_vars=playbook_vars) if status != 0: raise Exception("Could not deploy config to sync_gateway")
def __init__(self, cluster_config, target): self.ansible_runner = AnsibleRunner(cluster_config) self.ip = target["ip"] self.url = "http://{}:4984".format(target["ip"]) self.hostname = target["name"] self._headers = {'Content-Type': 'application/json'} self.admin = Admin(self) self.cluster_config = cluster_config self.server_port = 8091 self.server_scheme = "http" if is_cbs_ssl_enabled(self.cluster_config): self.server_port = 18091 self.server_scheme = "https" self.couchbase_server_primary_node = add_cbs_to_sg_config_server_field( self.cluster_config)
def fetch_sync_gateway_profile(cluster_config, folder_name): max_num_attempts = 20 for attempt_number in range(max_num_attempts): try: ansible_runner = AnsibleRunner(config=cluster_config) print("\n") print("Pulling sync_gateway profile. Attempt #{}".format( attempt_number)) # fetch logs from sync_gateway instances status = ansible_runner.run_ansible_playbook( "fetch-sync-gateway-profile.yml") if status != 0: raise Exception("Failed to fetch sync_gateway profile") # zip logs and timestamp if os.path.isdir("/tmp/sync_gateway_profile"): # Move perf logs to performance_results shutil.move( "/tmp/sync_gateway_profile", "testsuites/syncgateway/performance/results/{}/".format( folder_name)) print("\n") return except Exception as e: log_info("Exception trying to collect sync gateway profile: {}", e) if attempt_number < (max_num_attempts - 1): log_info("Going to retry. So far {}/{} attempts".format( attempt_number, max_num_attempts)) time.sleep(30) else: log_info("Exhausted attempts. Giving up") attempt_number += 1
def fetch_machine_stats(cluster_config, folder_name): ansible_runner = AnsibleRunner(config=cluster_config) print("\n") print("Pulling logs") # fetch logs from sync_gateway instances status = ansible_runner.run_ansible_playbook("fetch-machine-stats.yml") assert status == 0, "Failed to fetch machine stats" # zip logs and timestamp if os.path.isdir("/tmp/perf_logs"): # Move perf logs to performance_results shutil.move( "/tmp/perf_logs", "testsuites/syncgateway/performance/results/{}/".format( folder_name)) print("\n")
def __init__(self, version_build, host, port, storage_engine): # Initialize baseclass properies super(LiteServNetMsft, self).__init__(version_build, host, port, storage_engine) if "LITESERV_MSFT_HOST_USER" not in os.environ: raise LiteServError( "Make sure you define 'LITESERV_MSFT_HOST_USER' as the windows user for the host you are targeting" ) if "LITESERV_MSFT_HOST_PASSWORD" not in os.environ: raise LiteServError( "Make sure you define 'LITESERV_MSFT_HOST_PASSWORD' as the windows user for the host you are targeting" ) # Create config for LiteServ Windows host ansible_liteserv_mfst_target_lines = [ "[windows]", "win1 ansible_host={}".format(host), "[windows:vars]", "ansible_user={}".format(os.environ["LITESERV_MSFT_HOST_USER"]), "ansible_password={}".format( os.environ["LITESERV_MSFT_HOST_PASSWORD"]), "ansible_port=5986", "ansible_connection=winrm", "# The following is necessary for Python 2.7.9+ when using default WinRM self-signed certificates:", "ansible_winrm_server_cert_validation=ignore", ] ansible_liteserv_mfst_target_string = "\n".join( ansible_liteserv_mfst_target_lines) log_info("Writing: {}".format(ansible_liteserv_mfst_target_string)) config_location = "resources/liteserv_configs/net-msft" with open(config_location, "w") as f: f.write(ansible_liteserv_mfst_target_string) self.ansible_runner = AnsibleRunner(config=config_location)
def run_sgload_perf_test(cluster_config, sgload_arg_list_main, skip_build_sgload): print( "Running sgload perf test against cluster: {}".format(cluster_config)) main_ansible_runner = AnsibleRunner(cluster_config) print(">>> Starting profile collection scripts") status = main_ansible_runner.run_ansible_playbook( "start-profile-collection.yml", extra_vars={ "stats_run_time": 3600, "delay_profiling_secs": 60 }, ) assert status == 0, "Could not start profiling collection scripts" # Install + configure telegraf status = main_ansible_runner.run_ansible_playbook("install-telegraf.yml") if status != 0: raise ProvisioningError("Failed to install telegraf") # build_sgload (ansible) if not skip_build_sgload: build_sgload(main_ansible_runner) # get load generator and sg hostnames lg_hosts_main = get_load_generators_hosts(cluster_config) sg_hosts_main = get_sync_gateways_hosts(cluster_config) # Get the first SG host from the list of SG hosts sg_host_main = sg_hosts_main[0] run_sgload_on_single_loadgenerator(lg_hosts_main, sgload_arg_list_main, sg_host_main) log_info("Finished")
def run_gateload_perf_test(gen_gateload_config, test_id, gateload_params, delay_profiling_secs, delay_expvar_collect_secs): try: cluster_config = os.environ["CLUSTER_CONFIG"] except KeyError: print ("Make sure CLUSTER_CONFIG is defined and pointing to the configuration you would like to provision") sys.exit(1) print("Running perf test against cluster: {}".format(cluster_config)) ansible_runner = AnsibleRunner(cluster_config) # Install + configure telegraf status = ansible_runner.run_ansible_playbook("install-telegraf.yml") if status != 0: raise ProvisioningError("Failed to install telegraf") test_run_id = "{}_{}".format(test_id, time.strftime("%Y-%m-%d-%H-%M-%S")) # Create test results directory os.makedirs("testsuites/syncgateway/performance/results/{}".format(test_run_id)) # Copy provisioning_config to performance_results/ folder shutil.copy("{}".format(cluster_config), "testsuites/syncgateway/performance/results/{}".format(test_run_id)) if int(gateload_params.number_pullers) > 0 and not gen_gateload_config: raise Exception("You specified --num-pullers but did not set --gen-gateload-config") # Build gateload print(">>> Building gateload") status = ansible_runner.run_ansible_playbook( "build-gateload.yml", extra_vars={}, ) assert status == 0, "Could not build gateload" # Generate gateload config print(">>> Generate gateload configs") if gen_gateload_config: generate_gateload_configs.main( cluster_config=cluster_config, test_id=test_run_id, gateload_params=gateload_params ) print(">>> Starting profile collection scripts") runtime_s = int(gateload_params.runtime_ms) // 1000 status = ansible_runner.run_ansible_playbook( "start-profile-collection.yml", extra_vars={ "stats_run_time": runtime_s, "delay_profiling_secs": int(delay_profiling_secs) }, ) assert status == 0, "Could not start profiling collection scripts" # Start gateload print(">>> Starting gateload with {0} pullers and {1} pushers".format( gateload_params.number_pullers, gateload_params.number_pushers )) status = ansible_runner.run_ansible_playbook( "start-gateload.yml", extra_vars={ "delay_expvar_collect_secs": int(delay_expvar_collect_secs) }, ) assert status == 0, "Could not start gateload" # write expvars to file, will exit when gateload scenario is done print(">>> Logging expvars") gateload_finished_successfully = log_expvars(cluster_config, test_run_id) print(">>> Fetch Sync Gateway profile") fetch_sync_gateway_profile(cluster_config, test_run_id) print(">>> Shutdown gateload") kill_gateload() if not gateload_finished_successfully: raise RuntimeError("It appears that gateload did not finish successfully. Check logs for details")
def restart_services(self): ansible_runner = AnsibleRunner(self._cluster_config) status = ansible_runner.run_ansible_playbook("restart-services.yml", extra_vars={}) assert status == 0, "Failed to restart services"
def reset(self, sg_config_path): ansible_runner = AnsibleRunner(self._cluster_config) log_info(">>> Reseting cluster ...") log_info(">>> CBS SSL enabled: {}".format(self.cbs_ssl)) log_info(">>> Using xattrs: {}".format(self.xattrs)) # Stop sync_gateways log_info(">>> Stopping sync_gateway") status = ansible_runner.run_ansible_playbook("stop-sync-gateway.yml") assert status == 0, "Failed to stop sync gateway" # Stop sync_gateway accels log_info(">>> Stopping sg_accel") status = ansible_runner.run_ansible_playbook("stop-sg-accel.yml") assert status == 0, "Failed to stop sg_accel" # Deleting sync_gateway artifacts log_info(">>> Deleting sync_gateway artifacts") status = ansible_runner.run_ansible_playbook( "delete-sync-gateway-artifacts.yml") assert status == 0, "Failed to delete sync_gateway artifacts" # Deleting sg_accel artifacts log_info(">>> Deleting sg_accel artifacts") status = ansible_runner.run_ansible_playbook( "delete-sg-accel-artifacts.yml") assert status == 0, "Failed to delete sg_accel artifacts" # Delete buckets log_info(">>> Deleting buckets on: {}".format(self.servers[0].url)) self.servers[0].delete_buckets() # Parse config and grab bucket names config_path_full = os.path.abspath(sg_config_path) config = Config(config_path_full) mode = config.get_mode() bucket_name_set = config.get_bucket_name_set() self.sync_gateway_config = config is_valid, reason = validate_cluster(self.sync_gateways, self.sg_accels, config) if not is_valid: raise ProvisioningError(reason) log_info(">>> Creating buckets on: {}".format(self.servers[0].url)) log_info(">>> Creating buckets {}".format(bucket_name_set)) self.servers[0].create_buckets(bucket_name_set) # Wait for server to be in a warmup state to work around # https://github.com/couchbase/sync_gateway/issues/1745 log_info(">>> Waiting for Server: {} to be in a healthy state".format( self.servers[0].url)) self.servers[0].wait_for_ready_state() log_info(">>> Starting sync_gateway with configuration: {}".format( config_path_full)) server_port = 8091 server_scheme = "http" couchbase_server_primary_node = add_cbs_to_sg_config_server_field( self._cluster_config) if self.cbs_ssl: server_port = 18091 server_scheme = "https" # Start sync-gateway playbook_vars = { "sync_gateway_config_filepath": config_path_full, "server_port": server_port, "server_scheme": server_scheme, "autoimport": "", "xattrs": "", "no_conflicts": "", "revs_limit": "", "couchbase_server_primary_node": couchbase_server_primary_node } # Add configuration to run with xattrs if self.xattrs: playbook_vars["autoimport"] = '"import_docs": "continuous",' playbook_vars["xattrs"] = '"enable_shared_bucket_access": true,' if no_conflicts_enabled(self._cluster_config): playbook_vars["no_conflicts"] = '"allow_conflicts": false,' try: revs_limit = get_revs_limit(self._cluster_config) playbook_vars["revs_limit"] = '"revs_limit": {},'.format( revs_limit) except KeyError as ex: log_info("Keyerror in getting revs_limit{}".format(ex.message)) status = ansible_runner.run_ansible_playbook("start-sync-gateway.yml", extra_vars=playbook_vars) assert status == 0, "Failed to start to Sync Gateway" # HACK - only enable sg_accel for distributed index tests # revise this with https://github.com/couchbaselabs/sync-gateway-testcluster/issues/222 if mode == "di": # Start sg-accel status = ansible_runner.run_ansible_playbook( "start-sg-accel.yml", extra_vars=playbook_vars) assert status == 0, "Failed to start sg_accel" # Validate CBGT if mode == "di": if not self.validate_cbgt_pindex_distribution_retry( len(self.sg_accels)): self.save_cbgt_diagnostics() raise Exception("Failed to validate CBGT Pindex distribution") log_info(">>> Detected valid CBGT Pindex distribution") else: log_info(">>> Running in channel cache") return mode
def params_from_base_test_setup(request, params_from_base_suite_setup): # Code before the yeild will execute before each test starts # pytest command line parameters cluster_config = params_from_base_suite_setup["cluster_config"] cluster_topology = params_from_base_suite_setup["cluster_topology"] mode = params_from_base_suite_setup["mode"] xattrs_enabled = params_from_base_suite_setup["xattrs_enabled"] server_version = params_from_base_suite_setup["server_version"] sync_gateway_version = params_from_base_suite_setup["sync_gateway_version"] server_upgraded_version = params_from_base_suite_setup[ "server_upgraded_version"] sync_gateway_upgraded_version = params_from_base_suite_setup[ "sync_gateway_upgraded_version"] liteserv_host = params_from_base_suite_setup["liteserv_host"] liteserv_port = params_from_base_suite_setup["liteserv_port"] liteserv_version = params_from_base_suite_setup["liteserv_version"] liteserv_platform = params_from_base_suite_setup["liteserv_platform"] liteserv_storage_engine = params_from_base_suite_setup[ "liteserv_storage_engine"] liteserv = params_from_base_suite_setup["liteserv"] num_docs = params_from_base_suite_setup["num_docs"] cbs_platform = params_from_base_suite_setup["cbs_platform"] cbs_toy_build = params_from_base_suite_setup["cbs_toy_build"] test_name = request.node.name log_info("Running test '{}'".format(test_name)) log_info("cluster_config: {}".format(cluster_config)) log_info("cluster_topology: {}".format(cluster_topology)) log_info("mode: {}".format(mode)) log_info("xattrs_enabled: {}".format(xattrs_enabled)) client = MobileRestClient() # Start LiteServ and delete any databases ls_url = liteserv.start("{}/logs/{}-{}-{}.txt".format( RESULTS_DIR, type(liteserv).__name__, test_name, datetime.datetime.now())) client.delete_databases(ls_url) cluster_helper = ClusterKeywords() cluster_hosts = cluster_helper.get_cluster_topology( cluster_config=cluster_config) sg_url = cluster_hosts["sync_gateways"][0]["public"] sg_admin_url = cluster_hosts["sync_gateways"][0]["admin"] if xattrs_enabled: log_info("Running upgrade with xattrs for sync meta storage") persist_cluster_config_environment_prop(cluster_config, 'xattrs_enabled', True) else: log_info("Using document storage for sync meta data") persist_cluster_config_environment_prop(cluster_config, 'xattrs_enabled', False) # This dictionary is passed to each test yield { "cluster_config": cluster_config, "cluster_topology": cluster_topology, "mode": mode, "xattrs_enabled": xattrs_enabled, "server_version": server_version, "sync_gateway_version": sync_gateway_version, "server_upgraded_version": server_upgraded_version, "sync_gateway_upgraded_version": sync_gateway_upgraded_version, "liteserv_host": liteserv_host, "liteserv_port": liteserv_port, "liteserv_version": liteserv_version, "liteserv_platform": liteserv_platform, "liteserv_storage_engine": liteserv_storage_engine, "ls_url": ls_url, "sg_url": sg_url, "sg_admin_url": sg_admin_url, "num_docs": num_docs, "cbs_platform": cbs_platform, "cbs_toy_build": cbs_toy_build } client.delete_databases(ls_url) liteserv.stop() # Code after the yield will execute when each test finishes log_info("Tearing down test '{}'".format(test_name)) network_utils = NetworkUtils() network_utils.list_connections() # Verify all sync_gateways and sg_accels are reachable c = cluster.Cluster(cluster_config) errors = c.verify_alive(mode) # Fetch logs logging_helper = Logging() logging_helper.fetch_and_analyze_logs(cluster_config=cluster_config, test_name=test_name) assert len(errors) == 0 # Scan logs # SG logs for panic, data race # System logs for OOM ansible_runner = AnsibleRunner(cluster_config) script_name = "{}/utilities/check_logs.sh".format(os.getcwd()) status = ansible_runner.run_ansible_playbook( "check-logs.yml", extra_vars={"script_name": script_name}) if status != 0: raise LogScanningError("Errors found in the logs")
def params_from_base_test_setup(request, params_from_base_suite_setup): # Code before the yeild will execute before each test starts # pytest command line parameters collect_logs = request.config.getoption("--collect-logs") cluster_config = params_from_base_suite_setup["cluster_config"] cluster_topology = params_from_base_suite_setup["cluster_topology"] mode = params_from_base_suite_setup["mode"] xattrs_enabled = params_from_base_suite_setup["xattrs_enabled"] test_name = request.node.name log_info("Running test '{}'".format(test_name)) log_info("cluster_config: {}".format(cluster_config)) log_info("cluster_topology: {}".format(cluster_topology)) log_info("mode: {}".format(mode)) log_info("xattrs_enabled: {}".format(xattrs_enabled)) # This dictionary is passed to each test yield { "cluster_config": cluster_config, "cluster_topology": cluster_topology, "mode": mode, "xattrs_enabled": xattrs_enabled, "server_seed_docs": params_from_base_suite_setup["server_seed_docs"], "max_docs": params_from_base_suite_setup["max_docs"], "num_users": params_from_base_suite_setup["num_users"], "create_batch_size": params_from_base_suite_setup["create_batch_size"], "create_delay": params_from_base_suite_setup["create_delay"], "update_runtime_sec": params_from_base_suite_setup["update_runtime_sec"], "update_batch_size": params_from_base_suite_setup["update_batch_size"], "update_docs_percentage": params_from_base_suite_setup["update_docs_percentage"], "update_delay": params_from_base_suite_setup["update_delay"], "changes_delay": params_from_base_suite_setup["changes_delay"], "changes_limit": params_from_base_suite_setup["changes_limit"] } # Code after the yield will execute when each test finishes log_info("Tearing down test '{}'".format(test_name)) network_utils = NetworkUtils() network_utils.list_connections() # Verify all sync_gateways and sg_accels are reachable c = cluster.Cluster(cluster_config) errors = c.verify_alive(mode) # if the test failed or a node is down, pull logs logging_helper = Logging() if collect_logs or request.node.rep_call.failed or len(errors) != 0: logging_helper.fetch_and_analyze_logs(cluster_config=cluster_config, test_name=test_name) assert len(errors) == 0 # Scan logs # SG logs for panic, data race # System logs for OOM ansible_runner = AnsibleRunner(cluster_config) script_name = "{}/utilities/check_logs.sh".format(os.getcwd()) status = ansible_runner.run_ansible_playbook( "check-logs.yml", extra_vars={"script_name": script_name}) if status != 0: logging_helper.fetch_and_analyze_logs(cluster_config=cluster_config, test_name=test_name) raise LogScanningError("Errors found in the logs")
def install_sync_gateway(cluster_config, sync_gateway_config, sg_ce=False, sg_platform="centos", sa_platform="centos"): log_info(sync_gateway_config) if sync_gateway_config.build_flags != "": log_warn( "\n\n!!! WARNING: You are building with flags: {} !!!\n\n".format( sync_gateway_config.build_flags)) ansible_runner = AnsibleRunner(cluster_config) config_path = os.path.abspath(sync_gateway_config.config_path) couchbase_server_primary_node = add_cbs_to_sg_config_server_field( cluster_config) # Create buckets unless the user explicitly asked to skip this step if not sync_gateway_config.skip_bucketcreation: create_server_buckets(cluster_config, sync_gateway_config) server_port = 8091 server_scheme = "http" if is_cbs_ssl_enabled(cluster_config): server_port = 18091 server_scheme = "https" # Shared vars playbook_vars = { "sync_gateway_config_filepath": config_path, "server_port": server_port, "server_scheme": server_scheme, "autoimport": "", "xattrs": "", "no_conflicts": "", "couchbase_server_primary_node": couchbase_server_primary_node } if is_xattrs_enabled(cluster_config): playbook_vars["autoimport"] = '"import_docs": "continuous",' playbook_vars["xattrs"] = '"enable_shared_bucket_access": true,' if no_conflicts_enabled(cluster_config): playbook_vars["no_conflicts"] = '"allow_conflicts": false,' try: revs_limit = get_revs_limit(cluster_config) playbook_vars["revs_limit"] = '"revs_limit": {},'.format(revs_limit) except KeyError as ex: log_info("Keyerror in getting revs_limit{}".format(ex.message)) # Install Sync Gateway via Source or Package if sync_gateway_config.commit is not None: # Install from source playbook_vars["commit"] = sync_gateway_config.commit playbook_vars["build_flags"] = sync_gateway_config.build_flags status = ansible_runner.run_ansible_playbook( "install-sync-gateway-source.yml", extra_vars=playbook_vars) if status != 0: raise ProvisioningError("Failed to install sync_gateway source") else: # Install from Package sync_gateway_base_url, sync_gateway_package_name, sg_accel_package_name = sync_gateway_config.sync_gateway_base_url_and_package( sg_ce=sg_ce, sg_platform=sg_platform, sa_platform=sa_platform) playbook_vars[ "couchbase_sync_gateway_package_base_url"] = sync_gateway_base_url playbook_vars[ "couchbase_sync_gateway_package"] = sync_gateway_package_name playbook_vars["couchbase_sg_accel_package"] = sg_accel_package_name if sg_platform == "windows": status = ansible_runner.run_ansible_playbook( "install-sync-gateway-package-windows.yml", extra_vars=playbook_vars) else: status = ansible_runner.run_ansible_playbook( "install-sync-gateway-package.yml", extra_vars=playbook_vars) if status != 0: raise ProvisioningError("Failed to install sync_gateway package") if sa_platform == "windows": status = ansible_runner.run_ansible_playbook( "install-sg-accel-package-windows.yml", extra_vars=playbook_vars) else: status = ansible_runner.run_ansible_playbook( "install-sg-accel-package.yml", extra_vars=playbook_vars) if status != 0: raise ProvisioningError("Failed to install sg_accel package") # Configure aws cloudwatch logs forwarder status = ansible_runner.run_ansible_playbook( "configure-sync-gateway-awslogs-forwarder.yml", extra_vars={}) if status != 0: raise ProvisioningError( "Failed to configure sync_gateway awslogs forwarder")
def stop_packet_capture(self, cluster_config): ansible_runner = AnsibleRunner(config=cluster_config) status = ansible_runner.run_ansible_playbook("stop-ngrep.yml") if status != 0: raise ProvisioningError("Failed to stop packet capture")