def install_couchbase_server(cluster_config,
                             couchbase_server_config,
                             cbs_platform="centos7"):

    log_info(cluster_config)
    log_info(couchbase_server_config)

    ansible_runner = AnsibleRunner(cluster_config)
    cluster_keywords = ClusterKeywords()
    cluster_topology = cluster_keywords.get_cluster_topology(cluster_config)
    server_url = cluster_topology["couchbase_servers"][0]
    cb_server = CouchbaseServer(server_url)

    log_info(">>> Installing Couchbase Server")
    # Install Server
    server_baseurl, server_package_name = couchbase_server_config.get_baseurl_package(
        cb_server, cbs_platform)
    status = ansible_runner.run_ansible_playbook(
        "install-couchbase-server-package.yml",
        extra_vars={
            "couchbase_server_package_base_url": server_baseurl,
            "couchbase_server_package_name": server_package_name
        })
    if status != 0:
        raise ProvisioningError("Failed to install Couchbase Server")

    # Wait for server to be in 'healthy state'
    print(">>> Waiting for server to be in 'healthy' state")
    cb_server.wait_for_ready_state()
def fetch_sync_gateway_logs(cluster_config, prefix):
    ansible_runner = AnsibleRunner(cluster_config)

    log_info("Pulling sync_gateway / sg_accel logs")
    # fetch logs from sync_gateway instances
    status = ansible_runner.run_ansible_playbook("fetch-sync-gateway-logs.yml")
    if status != 0:
        raise CollectionError("Could not pull logs")

    # zip logs and timestamp
    if os.path.isdir("/tmp/sg_logs"):

        date_time = time.strftime("%Y-%m-%d-%H-%M-%S")
        temp_log_path = "/tmp/{}-{}-sglogs".format(prefix, date_time)
        shutil.make_archive(temp_log_path, "zip", "/tmp/sg_logs")
        shutil.rmtree("/tmp/sg_logs")

        # Copy logs to results dir
        zip_file_path = "{}.zip".format(temp_log_path)
        log_results_location = "{}/logs".format(RESULTS_DIR)
        shutil.copy(zip_file_path, log_results_location)

        zip_name = "{}-{}-sglogs.zip".format(prefix, date_time)
        result_zip = "{}/{}".format(log_results_location, zip_name)
        log_info("sync_gateway logs copied to {}".format(result_zip))

        return result_zip
    else:
        raise CollectionError("Error finding pulled logs at /tmp/sg_logs")
class SgAccel:
    def __init__(self, cluster_config, target):
        self.ansible_runner = AnsibleRunner(cluster_config)
        self.ip = target["ip"]
        self.url = "http://{}:4985".format(target["ip"])
        self.hostname = target["name"]

    def info(self):
        r = requests.get(self.url)
        r.raise_for_status()
        return r.text

    def stop(self):
        status = self.ansible_runner.run_ansible_playbook("stop-sg-accel.yml", subset=self.hostname)
        return status

    def start(self, config):
        conf_path = os.path.abspath(config)

        log.info(">>> Starting sg_accel with configuration: {}".format(conf_path))

        status = self.ansible_runner.run_ansible_playbook(
            "start-sg-accel.yml", extra_vars={"sync_gateway_config_filepath": conf_path}, subset=self.hostname
        )
        return status

    def __repr__(self):
        return "SgAccel: {}:{}\n".format(self.hostname, self.ip)
def fetch_sync_gateway_logs(cluster_config, prefix):
    ansible_runner = AnsibleRunner(cluster_config)

    log_info("Pulling sync_gateway / sg_accel logs")
    # fetch logs from sync_gateway instances
    status = ansible_runner.run_ansible_playbook("fetch-sync-gateway-logs.yml")
    if status != 0:
        raise CollectionError("Could not pull logs")

    # zip logs and timestamp
    if os.path.isdir("/tmp/sg_logs"):

        date_time = time.strftime("%Y-%m-%d-%H-%M-%S")
        temp_log_path = "/tmp/{}-{}-sglogs".format(prefix, date_time)
        shutil.make_archive(temp_log_path, "zip", "/tmp/sg_logs")
        shutil.rmtree("/tmp/sg_logs")

        # Copy logs to results dir
        zip_file_path = "{}.zip".format(temp_log_path)
        log_results_location = "{}/logs".format(RESULTS_DIR)
        shutil.copy(zip_file_path, log_results_location)

        zip_name = "{}-{}-sglogs.zip".format(prefix, date_time)
        result_zip = "{}/{}".format(log_results_location, zip_name)
        log_info("sync_gateway logs copied to {}".format(result_zip))

        return result_zip
    else:
        raise CollectionError("Error finding pulled logs at /tmp/sg_logs")
def push_cbcollect_info_supportal(cluster_config):
    """
    1. Runs cbcollect_info on one of the couchbase server nodes
    2. Pushes to supportal.couchbase.com
    """
    ansible_runner = AnsibleRunner(config=cluster_config)
    status = ansible_runner.run_ansible_playbook("push-cbcollect-info-supportal.yml")
    assert status == 0, "Failed to push cbcollect info"
def push_cbcollect_info_supportal(cluster_config):
    """
    1. Runs cbcollect_info on one of the couchbase server nodes
    2. Pushes to supportal.couchbase.com
    """
    ansible_runner = AnsibleRunner(config=cluster_config)
    status = ansible_runner.run_ansible_playbook(
        "push-cbcollect-info-supportal.yml")
    assert status == 0, "Failed to push cbcollect info"
    def upgrade_server(self,
                       cluster_config,
                       server_version_build,
                       cbs_platform,
                       target=None,
                       toy_build=None):
        ansible_runner = AnsibleRunner(cluster_config)

        log_info(">>> Upgrading Couchbase Server")
        # Install Server
        if toy_build:
            # http://server.jenkins.couchbase.com/view/All/job/watson-toy/1770/artifact/couchbase-server-enterprise-5.0.0-9900-centos7.x86_64.rpm
            toy_build_url_parts = toy_build.split('/')
            toy_build_url_len = len(toy_build_url_parts)
            server_package_name = toy_build_url_parts[-1]
            server_baseurl = "/".join(
                toy_build_url_parts[0:(toy_build_url_len - 1)])
        else:
            version_build = server_version_build.split("-")
            server_verion = version_build[0]
            if len(version_build) == 2:
                # Build number is included
                server_build = version_build[1]
            else:
                server_build = None

            if server_build is None:
                server_baseurl, server_package_name = self.resolve_cb_mobile_url(
                    server_verion, cbs_platform)
            else:
                server_baseurl, server_package_name = self.resolve_cb_nas_url(
                    server_verion, server_build, cbs_platform)

        if target is not None:
            target = hostname_for_url(cluster_config, target)
            log_info("Upgrading Couchbase server on {} ...".format(target))
            status = ansible_runner.run_ansible_playbook(
                "upgrade-couchbase-server-package.yml",
                subset=target,
                extra_vars={
                    "couchbase_server_package_base_url": server_baseurl,
                    "couchbase_server_package_name": server_package_name
                })
        else:
            log_info("Upgrading Couchbase server on all nodes")
            status = ansible_runner.run_ansible_playbook(
                "upgrade-couchbase-server-package.yml",
                extra_vars={
                    "couchbase_server_package_base_url": server_baseurl,
                    "couchbase_server_package_name": server_package_name
                })

        if status != 0:
            raise ProvisioningError("Failed to install Couchbase Server")

        self.wait_for_ready_state()
 def stop_sync_gateway(self, cluster_config, url):
     target = hostname_for_url(cluster_config, url)
     log_info("Shutting down sync_gateway on {} ...".format(target))
     ansible_runner = AnsibleRunner(cluster_config)
     status = ansible_runner.run_ansible_playbook(
         "stop-sync-gateway.yml",
         subset=target
     )
     if status != 0:
         raise ProvisioningError("Could not stop sync_gateway")
    def start_sync_gateways(self, cluster_config, url=None, config=None):
        """ Start sync gateways in a cluster. If url is passed,
        start the sync gateway at that url
        """

        if config is None:
            raise ProvisioningError(
                "Starting a Sync Gateway requires a config")

        ansible_runner = AnsibleRunner(cluster_config)
        config_path = os.path.abspath(config)
        couchbase_server_primary_node = add_cbs_to_sg_config_server_field(
            cluster_config)
        if is_cbs_ssl_enabled(cluster_config):
            self.server_port = 18091
            self.server_scheme = "https"

        playbook_vars = {
            "sync_gateway_config_filepath": config_path,
            "server_port": self.server_port,
            "server_scheme": self.server_scheme,
            "autoimport": "",
            "xattrs": "",
            "no_conflicts": "",
            "revs_limit": "",
            "couchbase_server_primary_node": couchbase_server_primary_node
        }

        if is_xattrs_enabled(cluster_config):
            playbook_vars["autoimport"] = '"import_docs": "continuous",'
            playbook_vars["xattrs"] = '"enable_shared_bucket_access": true,'

        if no_conflicts_enabled(cluster_config):
            playbook_vars["no_conflicts"] = '"allow_conflicts": false,'
        try:
            revs_limit = get_revs_limit(cluster_config)
            playbook_vars["revs_limit"] = '"revs_limit": {},'.format(
                revs_limit)
        except KeyError as ex:
            log_info("Keyerror in getting revs_limit{}".format(ex.message))
        if url is not None:
            target = hostname_for_url(cluster_config, url)
            log_info("Starting {} sync_gateway.".format(target))
            status = ansible_runner.run_ansible_playbook(
                "start-sync-gateway.yml",
                extra_vars=playbook_vars,
                subset=target)
        else:
            log_info("Starting all sync_gateways.")
            status = ansible_runner.run_ansible_playbook(
                "start-sync-gateway.yml", extra_vars=playbook_vars)
        if status != 0:
            raise ProvisioningError("Could not start sync_gateway")
Beispiel #10
0
    def __init__(self, cluster_config, target):
        self.ansible_runner = AnsibleRunner(cluster_config)
        self.ip = target["ip"]
        self.url = "http://{}:4985".format(target["ip"])
        self.hostname = target["name"]
        self.cluster_config = cluster_config
        self.server_port = 8091
        self.server_scheme = "http"

        if is_cbs_ssl_enabled(self.cluster_config):
            self.server_port = 18091
            self.server_scheme = "https"
    def collect_packet_capture(self, cluster_config, test_name):
        ansible_runner = AnsibleRunner(config=cluster_config)
        status = ansible_runner.run_ansible_playbook("collect-ngrep.yml")
        if status != 0:
            raise ProvisioningError("Failed to collect packet capture")

        # zip logs and timestamp
        if os.path.isdir("/tmp/sys-logs"):
            date_time = time.strftime("%Y-%m-%d-%H-%M-%S")
            name = "/tmp/ngrep-{}-{}-output".format(test_name, date_time)
            shutil.make_archive(name, "zip", "/tmp/sys-logs")
            shutil.rmtree("/tmp/sys-logs")
            print("ngrep logs copied here {}.zip\n".format(name))
    def collect_packet_capture(self, cluster_config, test_name):
        ansible_runner = AnsibleRunner(config=cluster_config)
        status = ansible_runner.run_ansible_playbook("collect-ngrep.yml")
        if status != 0:
            raise ProvisioningError("Failed to collect packet capture")

        # zip logs and timestamp
        if os.path.isdir("/tmp/sys-logs"):
            date_time = time.strftime("%Y-%m-%d-%H-%M-%S")
            name = "/tmp/ngrep-{}-{}-output".format(test_name, date_time)
            shutil.make_archive(name, "zip", "/tmp/sys-logs")
            shutil.rmtree("/tmp/sys-logs")
            print("ngrep logs copied here {}.zip\n".format(name))
 def start_sync_gateway(self, cluster_config, url, config):
     target = hostname_for_url(cluster_config, url)
     log_info("Starting sync_gateway on {} ...".format(target))
     ansible_runner = AnsibleRunner(cluster_config)
     config_path = os.path.abspath(config)
     status = ansible_runner.run_ansible_playbook(
         "start-sync-gateway.yml",
         extra_vars={
             "sync_gateway_config_filepath": config_path
         },
         subset=target
     )
     if status != 0:
         raise ProvisioningError("Could not start sync_gateway")
    def upgrade_sync_gateways(self,
                              cluster_config,
                              sg_conf,
                              sync_gateway_version,
                              url=None):
        """ Upgrade sync gateways in a cluster. If url is passed, upgrade
            the sync gateway at that url
        """
        ansible_runner = AnsibleRunner(cluster_config)

        from libraries.provision.install_sync_gateway import SyncGatewayConfig
        version, build = version_and_build(sync_gateway_version)
        sg_config = SyncGatewayConfig(commit=None,
                                      version_number=version,
                                      build_number=build,
                                      config_path=sg_conf,
                                      build_flags="",
                                      skip_bucketcreation=False)
        sg_conf = os.path.abspath(sg_config.config_path)

        # Shared vars
        playbook_vars = {}

        sync_gateway_base_url, sync_gateway_package_name, sg_accel_package_name = sg_config.sync_gateway_base_url_and_package(
        )

        playbook_vars[
            "couchbase_sync_gateway_package_base_url"] = sync_gateway_base_url
        playbook_vars[
            "couchbase_sync_gateway_package"] = sync_gateway_package_name
        playbook_vars["couchbase_sg_accel_package"] = sg_accel_package_name

        if url is not None:
            target = hostname_for_url(cluster_config, url)
            log_info(
                "Upgrading sync_gateway/sg_accel on {} ...".format(target))
            status = ansible_runner.run_ansible_playbook(
                "upgrade-sg-sgaccel-package.yml",
                subset=target,
                extra_vars=playbook_vars)
            log_info("Completed upgrading {}".format(url))
        else:
            log_info("Upgrading all sync_gateways/sg_accels")
            status = ansible_runner.run_ansible_playbook(
                "upgrade-sg-sgaccel-package.yml", extra_vars=playbook_vars)
            log_info("Completed upgrading all sync_gateways/sg_accels")
        if status != 0:
            raise Exception("Could not upgrade sync_gateway/sg_accel")
 def __init__(self, cluster_config, target):
     self.ansible_runner = AnsibleRunner(cluster_config)
     self.ip = target["ip"]
     self.url = "http://{}:4984".format(target["ip"])
     self.hostname = target["name"]
     self._headers = {'Content-Type': 'application/json'}
     self.admin = Admin(self)
    def __init__(self, version_build, host, port, storage_engine):

        # Initialize baseclass properies
        super(LiteServNetMsft, self).__init__(version_build, host, port, storage_engine)

        if "LITESERV_MSFT_HOST_USER" not in os.environ:
            raise LiteServError("Make sure you define 'LITESERV_MSFT_HOST_USER' as the windows user for the host you are targeting")

        if "LITESERV_MSFT_HOST_PASSWORD" not in os.environ:
            raise LiteServError("Make sure you define 'LITESERV_MSFT_HOST_PASSWORD' as the windows user for the host you are targeting")

        # Create config for LiteServ Windows host
        ansible_liteserv_mfst_target_lines = [
            "[windows]",
            "win1 ansible_host={}".format(host),
            "[windows:vars]",
            "ansible_user={}".format(os.environ["LITESERV_MSFT_HOST_USER"]),
            "ansible_password={}".format(os.environ["LITESERV_MSFT_HOST_PASSWORD"]),
            "ansible_port=5986",
            "ansible_connection=winrm",
            "# The following is necessary for Python 2.7.9+ when using default WinRM self-signed certificates:",
            "ansible_winrm_server_cert_validation=ignore",
        ]

        ansible_liteserv_mfst_target_string = "\n".join(ansible_liteserv_mfst_target_lines)
        log_info("Writing: {}".format(ansible_liteserv_mfst_target_string))
        config_location = "resources/liteserv_configs/net-msft"

        with open(config_location, "w") as f:
            f.write(ansible_liteserv_mfst_target_string)

        self.ansible_runner = AnsibleRunner(config=config_location)
    def restart_sync_gateways(self, cluster_config, url=None):
        """ Restart sync gateways in a cluster. If url is passed, restart
         the sync gateway at that url
        """
        ansible_runner = AnsibleRunner(cluster_config)

        if url is not None:
            target = hostname_for_url(cluster_config, url)
            log_info("Restarting sync_gateway on {} ...".format(target))
            status = ansible_runner.run_ansible_playbook(
                "restart-sync-gateway.yml", subset=target)
        else:
            log_info("Restarting all sync_gateways")
            status = ansible_runner.run_ansible_playbook(
                "restart-sync-gateway.yml", )
        if status != 0:
            raise ProvisioningError("Could not restart sync_gateway")
def kill_gateload():
    try:
        cluster_config = os.environ["CLUSTER_CONFIG"]
    except KeyError:
        print(
            "Make sure CLUSTER_CONFIG is defined and pointing to the configuration you would like to provision"
        )
        sys.exit(1)

    print("Running perf test against cluster: {}".format(cluster_config))
    ansible_runner = AnsibleRunner(cluster_config)

    status = ansible_runner.run_ansible_playbook("kill-gateload.yml")
    if status != 0:
        print(
            "Killing gateload returned non-zero status: {}.  Most likely it was no longer running"
            .format(status))
    def enable_import_xattrs(self,
                             cluster_config,
                             sg_conf,
                             url,
                             enable_import=False):
        """Deploy an SG config with xattrs enabled
            Will also enable import if enable_import is set to True
            It is used to enable xattrs and import in the SG config"""
        ansible_runner = AnsibleRunner(cluster_config)
        server_port = 8091
        server_scheme = "http"

        if is_cbs_ssl_enabled(cluster_config):
            server_port = 18091
            server_scheme = "https"

        # Shared vars
        playbook_vars = {
            "sync_gateway_config_filepath": sg_conf,
            "server_port": server_port,
            "server_scheme": server_scheme,
            "autoimport": "",
            "xattrs": ""
        }

        if is_xattrs_enabled(cluster_config):
            playbook_vars["xattrs"] = '"enable_shared_bucket_access": true,'

        if is_xattrs_enabled(cluster_config) and enable_import:
            playbook_vars["autoimport"] = '"import_docs": "continuous",'

        # Deploy config
        if url is not None:
            target = hostname_for_url(cluster_config, url)
            log_info("Deploying sync_gateway config on {} ...".format(target))
            status = ansible_runner.run_ansible_playbook(
                "deploy-sync-gateway-config.yml",
                subset=target,
                extra_vars=playbook_vars)
        else:
            log_info("Deploying config on all sync_gateways")
            status = ansible_runner.run_ansible_playbook(
                "deploy-sync-gateway-config.yml", extra_vars=playbook_vars)
        if status != 0:
            raise Exception("Could not deploy config to sync_gateway")
Beispiel #20
0
    def __init__(self, cluster_config, target):
        self.ansible_runner = AnsibleRunner(cluster_config)
        self.ip = target["ip"]
        self.url = "http://{}:4984".format(target["ip"])
        self.hostname = target["name"]
        self._headers = {'Content-Type': 'application/json'}
        self.admin = Admin(self)

        self.cluster_config = cluster_config
        self.server_port = 8091
        self.server_scheme = "http"

        if is_cbs_ssl_enabled(self.cluster_config):
            self.server_port = 18091
            self.server_scheme = "https"

        self.couchbase_server_primary_node = add_cbs_to_sg_config_server_field(
            self.cluster_config)
def fetch_sync_gateway_profile(cluster_config, folder_name):

    ansible_runner = AnsibleRunner(config=cluster_config)

    print("\n")

    print("Pulling sync_gateway profile ...")
    # fetch logs from sync_gateway instances
    status = ansible_runner.run_ansible_playbook("fetch-sync-gateway-profile.yml")
    assert status == 0, "Failed to fetch sync_gateway profile"

    # zip logs and timestamp
    if os.path.isdir("/tmp/sync_gateway_profile"):

        # Move perf logs to performance_results
        shutil.move("/tmp/sync_gateway_profile", "testsuites/syncgateway/performance/results/{}/".format(folder_name))

    print("\n")
def fetch_machine_stats(cluster_config, folder_name):

    ansible_runner = AnsibleRunner(config=cluster_config)

    print("\n")

    print("Pulling logs")
    # fetch logs from sync_gateway instances
    status = ansible_runner.run_ansible_playbook("fetch-machine-stats.yml")
    assert status == 0, "Failed to fetch machine stats"

    # zip logs and timestamp
    if os.path.isdir("/tmp/perf_logs"):

        # Move perf logs to performance_results
        shutil.move("/tmp/perf_logs", "testsuites/syncgateway/performance/results/{}/".format(folder_name))

    print("\n")
def fetch_sync_gateway_profile(cluster_config, folder_name):

    max_num_attempts = 20

    for attempt_number in range(max_num_attempts):

        try:

            ansible_runner = AnsibleRunner(config=cluster_config)

            print("\n")

            print("Pulling sync_gateway profile.  Attempt #{}".format(
                attempt_number))
            # fetch logs from sync_gateway instances
            status = ansible_runner.run_ansible_playbook(
                "fetch-sync-gateway-profile.yml")
            if status != 0:
                raise Exception("Failed to fetch sync_gateway profile")

            # zip logs and timestamp
            if os.path.isdir("/tmp/sync_gateway_profile"):

                # Move perf logs to performance_results
                shutil.move(
                    "/tmp/sync_gateway_profile",
                    "testsuites/syncgateway/performance/results/{}/".format(
                        folder_name))

            print("\n")
            return

        except Exception as e:
            log_info("Exception trying to collect sync gateway profile: {}", e)
            if attempt_number < (max_num_attempts - 1):
                log_info("Going to retry.  So far {}/{} attempts".format(
                    attempt_number, max_num_attempts))
                time.sleep(30)
            else:
                log_info("Exhausted attempts.  Giving up")

        attempt_number += 1
Beispiel #24
0
def fetch_machine_stats(cluster_config, folder_name):

    ansible_runner = AnsibleRunner(config=cluster_config)

    print("\n")

    print("Pulling logs")
    # fetch logs from sync_gateway instances
    status = ansible_runner.run_ansible_playbook("fetch-machine-stats.yml")
    assert status == 0, "Failed to fetch machine stats"

    # zip logs and timestamp
    if os.path.isdir("/tmp/perf_logs"):

        # Move perf logs to performance_results
        shutil.move(
            "/tmp/perf_logs",
            "testsuites/syncgateway/performance/results/{}/".format(
                folder_name))

    print("\n")
Beispiel #25
0
    def __init__(self, version_build, host, port, storage_engine):

        # Initialize baseclass properies
        super(LiteServNetMsft, self).__init__(version_build, host, port,
                                              storage_engine)

        if "LITESERV_MSFT_HOST_USER" not in os.environ:
            raise LiteServError(
                "Make sure you define 'LITESERV_MSFT_HOST_USER' as the windows user for the host you are targeting"
            )

        if "LITESERV_MSFT_HOST_PASSWORD" not in os.environ:
            raise LiteServError(
                "Make sure you define 'LITESERV_MSFT_HOST_PASSWORD' as the windows user for the host you are targeting"
            )

        # Create config for LiteServ Windows host
        ansible_liteserv_mfst_target_lines = [
            "[windows]",
            "win1 ansible_host={}".format(host),
            "[windows:vars]",
            "ansible_user={}".format(os.environ["LITESERV_MSFT_HOST_USER"]),
            "ansible_password={}".format(
                os.environ["LITESERV_MSFT_HOST_PASSWORD"]),
            "ansible_port=5986",
            "ansible_connection=winrm",
            "# The following is necessary for Python 2.7.9+ when using default WinRM self-signed certificates:",
            "ansible_winrm_server_cert_validation=ignore",
        ]

        ansible_liteserv_mfst_target_string = "\n".join(
            ansible_liteserv_mfst_target_lines)
        log_info("Writing: {}".format(ansible_liteserv_mfst_target_string))
        config_location = "resources/liteserv_configs/net-msft"

        with open(config_location, "w") as f:
            f.write(ansible_liteserv_mfst_target_string)

        self.ansible_runner = AnsibleRunner(config=config_location)
Beispiel #26
0
def run_sgload_perf_test(cluster_config, sgload_arg_list_main,
                         skip_build_sgload):

    print(
        "Running sgload perf test against cluster: {}".format(cluster_config))
    main_ansible_runner = AnsibleRunner(cluster_config)

    print(">>> Starting profile collection scripts")
    status = main_ansible_runner.run_ansible_playbook(
        "start-profile-collection.yml",
        extra_vars={
            "stats_run_time": 3600,
            "delay_profiling_secs": 60
        },
    )
    assert status == 0, "Could not start profiling collection scripts"

    # Install + configure telegraf
    status = main_ansible_runner.run_ansible_playbook("install-telegraf.yml")
    if status != 0:
        raise ProvisioningError("Failed to install telegraf")

    # build_sgload (ansible)
    if not skip_build_sgload:
        build_sgload(main_ansible_runner)

    # get load generator and sg hostnames
    lg_hosts_main = get_load_generators_hosts(cluster_config)
    sg_hosts_main = get_sync_gateways_hosts(cluster_config)

    # Get the first SG host from the list of SG hosts
    sg_host_main = sg_hosts_main[0]

    run_sgload_on_single_loadgenerator(lg_hosts_main, sgload_arg_list_main,
                                       sg_host_main)

    log_info("Finished")
    # parser.add_argument(), and the rest of the arguments will get read into
    # sgload_arg_list_main and then passed to sgload when invoked

    parser = argparse.ArgumentParser()
    parser.add_argument('--skip-build-sgload', action='store_true')
    args = parser.parse_known_args()
    known_args, sgload_arg_list_main = args  # unroll this tuple into named args

    try:
        main_cluster_config = os.environ["CLUSTER_CONFIG"]
    except KeyError:
        print ("Make sure CLUSTER_CONFIG is defined and pointing to the configuration you would like to provision")
        sys.exit(1)

    print("Running sgload perf test against cluster: {}".format(main_cluster_config))
    main_ansible_runner = AnsibleRunner(main_cluster_config)

    # Install + configure telegraf
    status = main_ansible_runner.run_ansible_playbook("install-telegraf.yml")
    if status != 0:
        raise ProvisioningError("Failed to install telegraf")

    # build_sgload (ansible)
    if not known_args.skip_build_sgload:
        build_sgload(main_ansible_runner)

    # get load generator and sg hostnames
    lg_hosts_main = get_load_generators_hosts(main_cluster_config)
    sg_hosts_main = get_sync_gateways_hosts(main_cluster_config)

    # Get the first SG host from the list of SG hosts
def install_sync_gateway(cluster_config,
                         sync_gateway_config,
                         sg_ce=False,
                         sg_platform="centos",
                         sa_platform="centos"):

    log_info(sync_gateway_config)

    if sync_gateway_config.build_flags != "":
        log_warn(
            "\n\n!!! WARNING: You are building with flags: {} !!!\n\n".format(
                sync_gateway_config.build_flags))

    ansible_runner = AnsibleRunner(cluster_config)
    config_path = os.path.abspath(sync_gateway_config.config_path)
    couchbase_server_primary_node = add_cbs_to_sg_config_server_field(
        cluster_config)
    # Create buckets unless the user explicitly asked to skip this step
    if not sync_gateway_config.skip_bucketcreation:
        create_server_buckets(cluster_config, sync_gateway_config)

    server_port = 8091
    server_scheme = "http"

    if is_cbs_ssl_enabled(cluster_config):
        server_port = 18091
        server_scheme = "https"

    # Shared vars
    playbook_vars = {
        "sync_gateway_config_filepath": config_path,
        "server_port": server_port,
        "server_scheme": server_scheme,
        "autoimport": "",
        "xattrs": "",
        "no_conflicts": "",
        "couchbase_server_primary_node": couchbase_server_primary_node
    }

    if is_xattrs_enabled(cluster_config):
        playbook_vars["autoimport"] = '"import_docs": "continuous",'
        playbook_vars["xattrs"] = '"enable_shared_bucket_access": true,'

    if no_conflicts_enabled(cluster_config):
        playbook_vars["no_conflicts"] = '"allow_conflicts": false,'
    try:
        revs_limit = get_revs_limit(cluster_config)
        playbook_vars["revs_limit"] = '"revs_limit": {},'.format(revs_limit)
    except KeyError as ex:
        log_info("Keyerror in getting revs_limit{}".format(ex.message))
    # Install Sync Gateway via Source or Package
    if sync_gateway_config.commit is not None:
        # Install from source
        playbook_vars["commit"] = sync_gateway_config.commit
        playbook_vars["build_flags"] = sync_gateway_config.build_flags

        status = ansible_runner.run_ansible_playbook(
            "install-sync-gateway-source.yml", extra_vars=playbook_vars)
        if status != 0:
            raise ProvisioningError("Failed to install sync_gateway source")

    else:
        # Install from Package
        sync_gateway_base_url, sync_gateway_package_name, sg_accel_package_name = sync_gateway_config.sync_gateway_base_url_and_package(
            sg_ce=sg_ce, sg_platform=sg_platform, sa_platform=sa_platform)

        playbook_vars[
            "couchbase_sync_gateway_package_base_url"] = sync_gateway_base_url
        playbook_vars[
            "couchbase_sync_gateway_package"] = sync_gateway_package_name
        playbook_vars["couchbase_sg_accel_package"] = sg_accel_package_name

        if sg_platform == "windows":
            status = ansible_runner.run_ansible_playbook(
                "install-sync-gateway-package-windows.yml",
                extra_vars=playbook_vars)
        else:
            status = ansible_runner.run_ansible_playbook(
                "install-sync-gateway-package.yml", extra_vars=playbook_vars)

        if status != 0:
            raise ProvisioningError("Failed to install sync_gateway package")

        if sa_platform == "windows":
            status = ansible_runner.run_ansible_playbook(
                "install-sg-accel-package-windows.yml",
                extra_vars=playbook_vars)
        else:
            status = ansible_runner.run_ansible_playbook(
                "install-sg-accel-package.yml", extra_vars=playbook_vars)
        if status != 0:
            raise ProvisioningError("Failed to install sg_accel package")

    # Configure aws cloudwatch logs forwarder
    status = ansible_runner.run_ansible_playbook(
        "configure-sync-gateway-awslogs-forwarder.yml", extra_vars={})
    if status != 0:
        raise ProvisioningError(
            "Failed to configure sync_gateway awslogs forwarder")
Beispiel #29
0
def params_from_base_test_setup(request, params_from_base_suite_setup):
    # Code before the yeild will execute before each test starts

    # pytest command line parameters
    cluster_config = params_from_base_suite_setup["cluster_config"]
    cluster_topology = params_from_base_suite_setup["cluster_topology"]
    mode = params_from_base_suite_setup["mode"]
    xattrs_enabled = params_from_base_suite_setup["xattrs_enabled"]
    server_version = params_from_base_suite_setup["server_version"]
    sync_gateway_version = params_from_base_suite_setup["sync_gateway_version"]
    server_upgraded_version = params_from_base_suite_setup[
        "server_upgraded_version"]
    sync_gateway_upgraded_version = params_from_base_suite_setup[
        "sync_gateway_upgraded_version"]
    liteserv_host = params_from_base_suite_setup["liteserv_host"]
    liteserv_port = params_from_base_suite_setup["liteserv_port"]
    liteserv_version = params_from_base_suite_setup["liteserv_version"]
    liteserv_platform = params_from_base_suite_setup["liteserv_platform"]
    liteserv_storage_engine = params_from_base_suite_setup[
        "liteserv_storage_engine"]
    liteserv = params_from_base_suite_setup["liteserv"]
    num_docs = params_from_base_suite_setup["num_docs"]
    cbs_platform = params_from_base_suite_setup["cbs_platform"]
    cbs_toy_build = params_from_base_suite_setup["cbs_toy_build"]

    test_name = request.node.name
    log_info("Running test '{}'".format(test_name))
    log_info("cluster_config: {}".format(cluster_config))
    log_info("cluster_topology: {}".format(cluster_topology))
    log_info("mode: {}".format(mode))
    log_info("xattrs_enabled: {}".format(xattrs_enabled))

    client = MobileRestClient()

    # Start LiteServ and delete any databases
    ls_url = liteserv.start("{}/logs/{}-{}-{}.txt".format(
        RESULTS_DIR,
        type(liteserv).__name__, test_name, datetime.datetime.now()))
    client.delete_databases(ls_url)

    cluster_helper = ClusterKeywords()
    cluster_hosts = cluster_helper.get_cluster_topology(
        cluster_config=cluster_config)
    sg_url = cluster_hosts["sync_gateways"][0]["public"]
    sg_admin_url = cluster_hosts["sync_gateways"][0]["admin"]

    if xattrs_enabled:
        log_info("Running upgrade with xattrs for sync meta storage")
        persist_cluster_config_environment_prop(cluster_config,
                                                'xattrs_enabled', True)
    else:
        log_info("Using document storage for sync meta data")
        persist_cluster_config_environment_prop(cluster_config,
                                                'xattrs_enabled', False)

    # This dictionary is passed to each test
    yield {
        "cluster_config": cluster_config,
        "cluster_topology": cluster_topology,
        "mode": mode,
        "xattrs_enabled": xattrs_enabled,
        "server_version": server_version,
        "sync_gateway_version": sync_gateway_version,
        "server_upgraded_version": server_upgraded_version,
        "sync_gateway_upgraded_version": sync_gateway_upgraded_version,
        "liteserv_host": liteserv_host,
        "liteserv_port": liteserv_port,
        "liteserv_version": liteserv_version,
        "liteserv_platform": liteserv_platform,
        "liteserv_storage_engine": liteserv_storage_engine,
        "ls_url": ls_url,
        "sg_url": sg_url,
        "sg_admin_url": sg_admin_url,
        "num_docs": num_docs,
        "cbs_platform": cbs_platform,
        "cbs_toy_build": cbs_toy_build
    }

    client.delete_databases(ls_url)
    liteserv.stop()

    # Code after the yield will execute when each test finishes
    log_info("Tearing down test '{}'".format(test_name))

    network_utils = NetworkUtils()
    network_utils.list_connections()

    # Verify all sync_gateways and sg_accels are reachable
    c = cluster.Cluster(cluster_config)
    errors = c.verify_alive(mode)

    # Fetch logs
    logging_helper = Logging()
    logging_helper.fetch_and_analyze_logs(cluster_config=cluster_config,
                                          test_name=test_name)

    assert len(errors) == 0

    # Scan logs
    # SG logs for panic, data race
    # System logs for OOM
    ansible_runner = AnsibleRunner(cluster_config)
    script_name = "{}/utilities/check_logs.sh".format(os.getcwd())
    status = ansible_runner.run_ansible_playbook(
        "check-logs.yml", extra_vars={"script_name": script_name})

    if status != 0:
        raise LogScanningError("Errors found in the logs")
 def stop_packet_capture(self, cluster_config):
     ansible_runner = AnsibleRunner(config=cluster_config)
     status = ansible_runner.run_ansible_playbook("stop-ngrep.yml")
     if status != 0:
         raise ProvisioningError("Failed to stop packet capture")
 def stop_packet_capture(self, cluster_config):
     ansible_runner = AnsibleRunner(config=cluster_config)
     status = ansible_runner.run_ansible_playbook("stop-ngrep.yml")
     if status != 0:
         raise ProvisioningError("Failed to stop packet capture")
Beispiel #32
0
class SgAccel:
    def __init__(self, cluster_config, target):
        self.ansible_runner = AnsibleRunner(cluster_config)
        self.ip = target["ip"]
        self.url = "http://{}:4985".format(target["ip"])
        self.hostname = target["name"]
        self.cluster_config = cluster_config
        self.server_port = 8091
        self.server_scheme = "http"

        if is_cbs_ssl_enabled(self.cluster_config):
            self.server_port = 18091
            self.server_scheme = "https"

    def info(self):
        r = requests.get(self.url)
        r.raise_for_status()
        return r.text

    def stop(self):
        status = self.ansible_runner.run_ansible_playbook("stop-sg-accel.yml",
                                                          subset=self.hostname)
        return status

    def start(self, config):
        conf_path = os.path.abspath(config)

        log.info(
            ">>> Starting sg_accel with configuration: {}".format(conf_path))
        couchbase_server_primary_node = add_cbs_to_sg_config_server_field(
            self.cluster_config)
        playbook_vars = {
            "sync_gateway_config_filepath": conf_path,
            "server_port": self.server_port,
            "server_scheme": self.server_scheme,
            "autoimport": "",
            "xattrs": "",
            "no_conflicts": "",
            "revs_limit": "",
            "couchbase_server_primary_node": couchbase_server_primary_node
        }
        if is_xattrs_enabled(self.cluster_config):
            playbook_vars["autoimport"] = '"import_docs": "continuous",'
            playbook_vars["xattrs"] = '"enable_shared_bucket_access": true,'

        if no_conflicts_enabled(self.cluster_config):
            playbook_vars["no_conflicts"] = '"allow_conflicts": false,'
        try:
            revs_limit = get_revs_limit(self.cluster_config)
            playbook_vars["revs_limit"] = '"revs_limit": {},'.format(
                revs_limit)
        except KeyError as ex:
            log.info("Keyerror in getting revs_limit{}".format(ex.message))

        status = self.ansible_runner.run_ansible_playbook(
            "start-sg-accel.yml",
            extra_vars=playbook_vars,
            subset=self.hostname)
        return status

    def __repr__(self):
        return "SgAccel: {}:{}\n".format(self.hostname, self.ip)
Beispiel #33
0
    def reset(self, sg_config_path):

        ansible_runner = AnsibleRunner(self._cluster_config)

        log_info(">>> Reseting cluster ...")
        log_info(">>> CBS SSL enabled: {}".format(self.cbs_ssl))
        log_info(">>> Using xattrs: {}".format(self.xattrs))

        # Stop sync_gateways
        log_info(">>> Stopping sync_gateway")
        status = ansible_runner.run_ansible_playbook("stop-sync-gateway.yml")
        assert status == 0, "Failed to stop sync gateway"

        # Stop sync_gateway accels
        log_info(">>> Stopping sg_accel")
        status = ansible_runner.run_ansible_playbook("stop-sg-accel.yml")
        assert status == 0, "Failed to stop sg_accel"

        # Deleting sync_gateway artifacts
        log_info(">>> Deleting sync_gateway artifacts")
        status = ansible_runner.run_ansible_playbook(
            "delete-sync-gateway-artifacts.yml")
        assert status == 0, "Failed to delete sync_gateway artifacts"

        # Deleting sg_accel artifacts
        log_info(">>> Deleting sg_accel artifacts")
        status = ansible_runner.run_ansible_playbook(
            "delete-sg-accel-artifacts.yml")
        assert status == 0, "Failed to delete sg_accel artifacts"

        # Delete buckets
        log_info(">>> Deleting buckets on: {}".format(self.servers[0].url))
        self.servers[0].delete_buckets()

        # Parse config and grab bucket names
        config_path_full = os.path.abspath(sg_config_path)
        config = Config(config_path_full)
        mode = config.get_mode()
        bucket_name_set = config.get_bucket_name_set()

        self.sync_gateway_config = config

        is_valid, reason = validate_cluster(self.sync_gateways, self.sg_accels,
                                            config)
        if not is_valid:
            raise ProvisioningError(reason)

        log_info(">>> Creating buckets on: {}".format(self.servers[0].url))
        log_info(">>> Creating buckets {}".format(bucket_name_set))
        self.servers[0].create_buckets(bucket_name_set)

        # Wait for server to be in a warmup state to work around
        # https://github.com/couchbase/sync_gateway/issues/1745
        log_info(">>> Waiting for Server: {} to be in a healthy state".format(
            self.servers[0].url))
        self.servers[0].wait_for_ready_state()

        log_info(">>> Starting sync_gateway with configuration: {}".format(
            config_path_full))

        server_port = 8091
        server_scheme = "http"
        couchbase_server_primary_node = add_cbs_to_sg_config_server_field(
            self._cluster_config)
        if self.cbs_ssl:
            server_port = 18091
            server_scheme = "https"

        # Start sync-gateway
        playbook_vars = {
            "sync_gateway_config_filepath": config_path_full,
            "server_port": server_port,
            "server_scheme": server_scheme,
            "autoimport": "",
            "xattrs": "",
            "no_conflicts": "",
            "revs_limit": "",
            "couchbase_server_primary_node": couchbase_server_primary_node
        }

        # Add configuration to run with xattrs
        if self.xattrs:
            playbook_vars["autoimport"] = '"import_docs": "continuous",'
            playbook_vars["xattrs"] = '"enable_shared_bucket_access": true,'

        if no_conflicts_enabled(self._cluster_config):
            playbook_vars["no_conflicts"] = '"allow_conflicts": false,'
        try:
            revs_limit = get_revs_limit(self._cluster_config)
            playbook_vars["revs_limit"] = '"revs_limit": {},'.format(
                revs_limit)
        except KeyError as ex:
            log_info("Keyerror in getting revs_limit{}".format(ex.message))
        status = ansible_runner.run_ansible_playbook("start-sync-gateway.yml",
                                                     extra_vars=playbook_vars)
        assert status == 0, "Failed to start to Sync Gateway"

        # HACK - only enable sg_accel for distributed index tests
        # revise this with https://github.com/couchbaselabs/sync-gateway-testcluster/issues/222
        if mode == "di":
            # Start sg-accel
            status = ansible_runner.run_ansible_playbook(
                "start-sg-accel.yml", extra_vars=playbook_vars)
            assert status == 0, "Failed to start sg_accel"

        # Validate CBGT
        if mode == "di":
            if not self.validate_cbgt_pindex_distribution_retry(
                    len(self.sg_accels)):
                self.save_cbgt_diagnostics()
                raise Exception("Failed to validate CBGT Pindex distribution")
            log_info(">>> Detected valid CBGT Pindex distribution")
        else:
            log_info(">>> Running in channel cache")

        return mode
Beispiel #34
0
class SyncGateway:
    def __init__(self, cluster_config, target):
        self.ansible_runner = AnsibleRunner(cluster_config)
        self.ip = target["ip"]
        self.url = "http://{}:4984".format(target["ip"])
        self.hostname = target["name"]
        self._headers = {'Content-Type': 'application/json'}
        self.admin = Admin(self)

        self.cluster_config = cluster_config
        self.server_port = 8091
        self.server_scheme = "http"

        if is_cbs_ssl_enabled(self.cluster_config):
            self.server_port = 18091
            self.server_scheme = "https"

        self.couchbase_server_primary_node = add_cbs_to_sg_config_server_field(
            self.cluster_config)

    def info(self):
        r = requests.get(self.url)
        r.raise_for_status()
        return r.text

    def stop(self):
        status = self.ansible_runner.run_ansible_playbook(
            "stop-sync-gateway.yml", subset=self.hostname)
        return status

    def start(self, config):
        conf_path = os.path.abspath(config)
        log.info(">>> Starting sync_gateway with configuration: {}".format(
            conf_path))

        playbook_vars = {
            "sync_gateway_config_filepath": conf_path,
            "server_port": self.server_port,
            "server_scheme": self.server_scheme,
            "autoimport": "",
            "xattrs": "",
            "no_conflicts": "",
            "couchbase_server_primary_node": self.couchbase_server_primary_node
        }

        if is_xattrs_enabled(self.cluster_config):
            playbook_vars["autoimport"] = '"import_docs": "continuous",'
            playbook_vars["xattrs"] = '"enable_shared_bucket_access": true,'

        if no_conflicts_enabled(self.cluster_config):
            playbook_vars["no_conflicts"] = '"allow_conflicts": false,'
        try:
            revs_limit = get_revs_limit(self.cluster_config)
            playbook_vars["revs_limit"] = '"revs_limit": {},'.format(
                revs_limit)
        except KeyError as ex:
            log_info("Keyerror in getting revs_limit{}".format(ex.message))
        status = self.ansible_runner.run_ansible_playbook(
            "start-sync-gateway.yml",
            extra_vars=playbook_vars,
            subset=self.hostname)
        return status

    def restart(self, config, cluster_config=None):

        if (cluster_config is not None):
            self.cluster_config = cluster_config
        conf_path = os.path.abspath(config)
        log.info(">>> Restarting sync_gateway with configuration: {}".format(
            conf_path))

        playbook_vars = {
            "sync_gateway_config_filepath": conf_path,
            "server_port": self.server_port,
            "server_scheme": self.server_scheme,
            "autoimport": "",
            "xattrs": "",
            "no_conflicts": "",
            "revs_limit": "",
            "couchbase_server_primary_node": self.couchbase_server_primary_node
        }

        if is_xattrs_enabled(self.cluster_config):
            playbook_vars["autoimport"] = '"import_docs": "continuous",'
            playbook_vars["xattrs"] = '"enable_shared_bucket_access": true,'

        if no_conflicts_enabled(self.cluster_config):
            playbook_vars["no_conflicts"] = '"allow_conflicts": false,'
        try:
            revs_limit = get_revs_limit(self.cluster_config)
            playbook_vars["revs_limit"] = '"revs_limit": {},'.format(
                revs_limit)
        except KeyError as ex:
            log_info("Keyerror in getting revs_limit{}".format(ex.message))

        status = self.ansible_runner.run_ansible_playbook(
            "reset-sync-gateway.yml",
            extra_vars=playbook_vars,
            subset=self.hostname)
        return status

    def verify_launched(self):
        r = requests.get(self.url)
        log.info("GET {} ".format(r.url))
        log.info("{}".format(r.text))
        r.raise_for_status()

    def create_db(self, name):
        return self.admin.create_db(name)

    def delete_db(self, name):
        return self.admin.delete_db(name)

    def reset(self):
        dbs = self.admin.get_dbs()
        for db in dbs:
            self.admin.delete_db(db)

    def start_push_replication(self,
                               target,
                               source_db,
                               target_db,
                               continuous=True,
                               use_remote_source=False,
                               channels=None,
                               async=False,
                               use_admin_url=False):
class SyncGateway:

    def __init__(self, cluster_config, target):
        self.ansible_runner = AnsibleRunner(cluster_config)
        self.ip = target["ip"]
        self.url = "http://{}:4984".format(target["ip"])
        self.hostname = target["name"]
        self._headers = {'Content-Type': 'application/json'}
        self.admin = Admin(self)

    def info(self):
        r = requests.get(self.url)
        r.raise_for_status()
        return r.text

    def stop(self):
        status = self.ansible_runner.run_ansible_playbook(
            "stop-sync-gateway.yml",
            subset=self.hostname
        )
        return status

    def start(self, config):

        conf_path = os.path.abspath(config)

        log.info(">>> Starting sync_gateway with configuration: {}".format(conf_path))

        status = self.ansible_runner.run_ansible_playbook(
            "start-sync-gateway.yml",
            extra_vars={
                "sync_gateway_config_filepath": conf_path
            },
            subset=self.hostname
        )
        return status

    def restart(self, config):
        conf_path = os.path.abspath(config)

        log.info(">>> Restarting sync_gateway with configuration: {}".format(conf_path))

        status = self.ansible_runner.run_ansible_playbook(
            "reset-sync-gateway.yml",
            extra_vars={
                "sync_gateway_config_filepath": conf_path,
            },
            subset=self.hostname
        )
        return status

    def verify_launched(self):
        r = requests.get(self.url)
        log.info("GET {} ".format(r.url))
        log.info("{}".format(r.text))
        r.raise_for_status()

    def create_db(self, name):
        return self.admin.create_db(name)

    def delete_db(self, name):
        return self.admin.delete_db(name)

    def reset(self):
        dbs = self.admin.get_dbs()
        for db in dbs:
            self.admin.delete_db(db)

    def start_push_replication(self,
                               target,
                               source_db,
                               target_db,
                               continuous=True,
                               use_remote_source=False,
                               channels=None,
                               async=False,
                               use_admin_url=False):
def run_perf_test(number_pullers, number_pushers, use_gateload, gen_gateload_config,
                  test_id, sync_gateway_config_path, reset_sync_gateway, doc_size, runtime_ms, rampup_interval_ms):

    try:
        cluster_config = os.environ["CLUSTER_CONFIG"]
    except KeyError:
        print ("Make sure CLUSTER_CONFIG is defined and pointing to the configuration you would like to provision")
        sys.exit(1)

    print("Running perf test against cluster: {}".format(cluster_config))
    ansible_runner = AnsibleRunner(cluster_config)

    # Install + configure telegraf
    status = ansible_runner.run_ansible_playbook("install-telegraf.yml")
    if status != 0:
        raise ProvisioningError("Failed to install telegraf")

    test_run_id = "{}_{}".format(test_id, time.strftime("%Y-%m-%d-%H-%M-%S"))

    # Create test results directory
    os.makedirs("testsuites/syncgateway/performance/results/{}".format(test_run_id))

    print("Resetting Sync Gateway")
    if sync_gateway_config_path is None or len(sync_gateway_config_path) == 0:
        raise Exception("Missing Sync Gateway config file path")
    cluster = Cluster(config=cluster_config)
    if reset_sync_gateway:
        mode = cluster.reset(sync_gateway_config_path)
        print("Running in mode: {}".format(mode))

    # Copy provisioning_config to performance_results/ folder
    shutil.copy("{}".format(cluster_config), "testsuites/syncgateway/performance/results/{}".format(test_run_id))

    if use_gateload:
        print("Using Gateload")

        if int(number_pullers) > 0 and not gen_gateload_config:
            raise Exception("You specified --num-pullers but did not set --gen-gateload-config")

        # Build gateload
        print(">>> Building gateload")
        status = ansible_runner.run_ansible_playbook(
            "build-gateload.yml",
            extra_vars={},
        )
        assert status == 0, "Could not build gateload"

        # Generate gateload config
        print(">>> Generate gateload configs")
        if gen_gateload_config:
            generate_gateload_configs.main(
                cluster_config,
                number_pullers,
                number_pushers,
                test_run_id,
                doc_size,
                runtime_ms,
                rampup_interval_ms
            )

        # Start gateload
        print(">>> Starting gateload with {0} pullers and {1} pushers".format(number_pullers, number_pushers))
        status = ansible_runner.run_ansible_playbook(
            "start-gateload.yml",
            extra_vars={},
        )
        assert status == 0, "Could not start gateload"

    else:
        print("Using Gatling")
        print(">>> Starting gatling with {0} pullers and {1} pushers".format(number_pullers, number_pushers))

        # Configure gatling
        subprocess.call(["ansible-playbook", "-i", "{}".format(cluster_config), "configure-gatling.yml"])

        # Run Gatling
        subprocess.call([
            "ansible-playbook",
            "-i", "{}".format(cluster_config),
            "run-gatling-theme.yml",
            "--extra-vars", "number_of_pullers={0} number_of_pushers={1}".format(number_pullers, number_pushers)
        ])

    # write expvars to file, will exit when gateload scenario is done
    print(">>> Logging expvars")
    log_expvars(cluster_config, test_run_id)

    # Killing sync_gateway and sg_accel will trigger collection of
    #    1) machine_stats
    #    2) sync_gateway profile data
    print(">>> Stopping Sync Gateway")
    stop_sync_gateway_status = ansible_runner.run_ansible_playbook("stop-sync-gateway.yml")
    assert stop_sync_gateway_status == 0, "Failed to stop sync_gateway"

    print(">>> Stopping SG Accel")
    stop_sg_accel_status = ansible_runner.run_ansible_playbook("stop-sg-accel.yml")
    assert stop_sg_accel_status == 0, "Failed to stop sg_accel"

    # HACK: refresh interval for resource stat collection is 10 seconds.
    #  Make sure enough time has passed before collecting json
    print(">>> Sleep for 1 minute before collecting machine stats")
    time.sleep(61)

    print(">>> Fetch machine stats")
    fetch_machine_stats(cluster_config, test_run_id)

    # Fetch profile for sync_gateway while the endpoints are still running
    print(">>> Fetch Sync Gateway profile")
    fetch_sync_gateway_profile(cluster_config, test_run_id)

    # Copy sync_gateway logs to test results directory
    print(">>> Fetch Sync Gateway logs")
    fetch_sync_gateway_logs(cluster_config, test_run_id)

    # Invoke cb-collect-info and push to support portal
    print(">>> Invoke cbcollect info and push to support portal")
    push_cbcollect_info_supportal(cluster_config)
Beispiel #37
0
    def reset(self, sg_config_path):

        self.validate_cluster()

        ansible_runner = AnsibleRunner(self._cluster_config)

        # Stop sync_gateways
        log_info(">>> Stopping sync_gateway")
        status = ansible_runner.run_ansible_playbook("stop-sync-gateway.yml")
        assert status == 0, "Failed to stop sync gateway"

        # Stop sync_gateways
        log_info(">>> Stopping sg_accel")
        status = ansible_runner.run_ansible_playbook("stop-sg-accel.yml")
        assert status == 0, "Failed to stop sg_accel"

        # Deleting sync_gateway artifacts
        log_info(">>> Deleting sync_gateway artifacts")
        status = ansible_runner.run_ansible_playbook("delete-sync-gateway-artifacts.yml")
        assert status == 0, "Failed to delete sync_gateway artifacts"

        # Deleting sg_accel artifacts
        log_info(">>> Deleting sg_accel artifacts")
        status = ansible_runner.run_ansible_playbook("delete-sg-accel-artifacts.yml")
        assert status == 0, "Failed to delete sg_accel artifacts"

        # Delete buckets
        log_info(">>> Deleting buckets on: {}".format(self.cb_server.url))
        self.cb_server.delete_buckets()

        # Parse config and grab bucket names
        config_path_full = os.path.abspath(sg_config_path)
        config = Config(config_path_full)
        mode = config.get_mode()
        bucket_name_set = config.get_bucket_name_set()

        self.sync_gateway_config = config

        log_info(">>> Creating buckets on: {}".format(self.cb_server.url))
        log_info(">>> Creating buckets {}".format(bucket_name_set))
        self.cb_server.create_buckets(bucket_name_set)

        # Wait for server to be in a warmup state to work around
        # https://github.com/couchbase/sync_gateway/issues/1745
        log_info(">>> Waiting for Server: {} to be in a healthy state".format(self.cb_server.url))
        self.cb_server.wait_for_ready_state()

        log_info(">>> Starting sync_gateway with configuration: {}".format(config_path_full))
        utils.dump_file_contents_to_logs(config_path_full)

        # Start sync-gateway
        status = ansible_runner.run_ansible_playbook(
            "start-sync-gateway.yml",
            extra_vars={
                "sync_gateway_config_filepath": config_path_full
            }
        )
        assert status == 0, "Failed to start to Sync Gateway"

        # HACK - only enable sg_accel for distributed index tests
        # revise this with https://github.com/couchbaselabs/sync-gateway-testcluster/issues/222
        if mode == "di":
            # Start sg-accel
            status = ansible_runner.run_ansible_playbook(
                "start-sg-accel.yml",
                extra_vars={
                    "sync_gateway_config_filepath": config_path_full
                }
            )
            assert status == 0, "Failed to start sg_accel"

        # Validate CBGT
        if mode == "di":
            if not self.validate_cbgt_pindex_distribution_retry(len(self.sg_accels)):
                self.save_cbgt_diagnostics()
                raise Exception("Failed to validate CBGT Pindex distribution")
            log_info(">>> Detected valid CBGT Pindex distribution")
        else:
            log_info(">>> Running in channel cache")

        return mode
Beispiel #38
0
def params_from_base_test_setup(request, params_from_base_suite_setup):
    # Code before the yeild will execute before each test starts

    # pytest command line parameters
    collect_logs = request.config.getoption("--collect-logs")

    cluster_config = params_from_base_suite_setup["cluster_config"]
    cluster_topology = params_from_base_suite_setup["cluster_topology"]
    mode = params_from_base_suite_setup["mode"]
    xattrs_enabled = params_from_base_suite_setup["xattrs_enabled"]

    test_name = request.node.name
    log_info("Running test '{}'".format(test_name))
    log_info("cluster_config: {}".format(cluster_config))
    log_info("cluster_topology: {}".format(cluster_topology))
    log_info("mode: {}".format(mode))
    log_info("xattrs_enabled: {}".format(xattrs_enabled))

    # This dictionary is passed to each test
    yield {
        "cluster_config":
        cluster_config,
        "cluster_topology":
        cluster_topology,
        "mode":
        mode,
        "xattrs_enabled":
        xattrs_enabled,
        "server_seed_docs":
        params_from_base_suite_setup["server_seed_docs"],
        "max_docs":
        params_from_base_suite_setup["max_docs"],
        "num_users":
        params_from_base_suite_setup["num_users"],
        "create_batch_size":
        params_from_base_suite_setup["create_batch_size"],
        "create_delay":
        params_from_base_suite_setup["create_delay"],
        "update_runtime_sec":
        params_from_base_suite_setup["update_runtime_sec"],
        "update_batch_size":
        params_from_base_suite_setup["update_batch_size"],
        "update_docs_percentage":
        params_from_base_suite_setup["update_docs_percentage"],
        "update_delay":
        params_from_base_suite_setup["update_delay"],
        "changes_delay":
        params_from_base_suite_setup["changes_delay"],
        "changes_limit":
        params_from_base_suite_setup["changes_limit"]
    }

    # Code after the yield will execute when each test finishes
    log_info("Tearing down test '{}'".format(test_name))

    network_utils = NetworkUtils()
    network_utils.list_connections()

    # Verify all sync_gateways and sg_accels are reachable
    c = cluster.Cluster(cluster_config)
    errors = c.verify_alive(mode)

    # if the test failed or a node is down, pull logs
    logging_helper = Logging()
    if collect_logs or request.node.rep_call.failed or len(errors) != 0:
        logging_helper.fetch_and_analyze_logs(cluster_config=cluster_config,
                                              test_name=test_name)

    assert len(errors) == 0

    # Scan logs
    # SG logs for panic, data race
    # System logs for OOM
    ansible_runner = AnsibleRunner(cluster_config)
    script_name = "{}/utilities/check_logs.sh".format(os.getcwd())
    status = ansible_runner.run_ansible_playbook(
        "check-logs.yml", extra_vars={"script_name": script_name})

    if status != 0:
        logging_helper.fetch_and_analyze_logs(cluster_config=cluster_config,
                                              test_name=test_name)
        raise LogScanningError("Errors found in the logs")
Beispiel #39
0
 def __init__(self, cluster_config, target):
     self.ansible_runner = AnsibleRunner(cluster_config)
     self.ip = target["ip"]
     self.url = "http://{}:4985".format(target["ip"])
     self.hostname = target["name"]
Beispiel #40
0
 def restart_services(self):
     ansible_runner = AnsibleRunner(self._cluster_config)
     status = ansible_runner.run_ansible_playbook("restart-services.yml",
                                                  extra_vars={})
     assert status == 0, "Failed to restart services"
class LiteServNetMsft(LiteServBase):

    def __init__(self, version_build, host, port, storage_engine):

        # Initialize baseclass properies
        super(LiteServNetMsft, self).__init__(version_build, host, port, storage_engine)

        if "LITESERV_MSFT_HOST_USER" not in os.environ:
            raise LiteServError("Make sure you define 'LITESERV_MSFT_HOST_USER' as the windows user for the host you are targeting")

        if "LITESERV_MSFT_HOST_PASSWORD" not in os.environ:
            raise LiteServError("Make sure you define 'LITESERV_MSFT_HOST_PASSWORD' as the windows user for the host you are targeting")

        # Create config for LiteServ Windows host
        ansible_liteserv_mfst_target_lines = [
            "[windows]",
            "win1 ansible_host={}".format(host),
            "[windows:vars]",
            "ansible_user={}".format(os.environ["LITESERV_MSFT_HOST_USER"]),
            "ansible_password={}".format(os.environ["LITESERV_MSFT_HOST_PASSWORD"]),
            "ansible_port=5986",
            "ansible_connection=winrm",
            "# The following is necessary for Python 2.7.9+ when using default WinRM self-signed certificates:",
            "ansible_winrm_server_cert_validation=ignore",
        ]

        ansible_liteserv_mfst_target_string = "\n".join(ansible_liteserv_mfst_target_lines)
        log_info("Writing: {}".format(ansible_liteserv_mfst_target_string))
        config_location = "resources/liteserv_configs/net-msft"

        with open(config_location, "w") as f:
            f.write(ansible_liteserv_mfst_target_string)

        self.ansible_runner = AnsibleRunner(config=config_location)

    def download(self):
        """
        1. Downloads the LiteServ.zip package from latestbuild to the remote Windows host to Desktop\LiteServ\
        2. Extracts the package and removes the zip
        """

        version, build = version_and_build(self.version_build)
        download_url = "{}/couchbase-lite-net/{}/{}/LiteServ.zip".format(LATEST_BUILDS, version, build)
        package_name = "couchbase-lite-net-msft-{}-liteserv".format(self.version_build)

        # Download LiteServ via Ansible on remote machine
        status = self.ansible_runner.run_ansible_playbook("download-liteserv-msft.yml", extra_vars={
            "download_url": download_url,
            "package_name": package_name
        })

        if status != 0:
            raise LiteServError("Failed to download LiteServ package on remote machine")

    def install(self):
        """
        Installs needed packages on Windows host and removes any existing service wrappers for LiteServ
        """

        directory_path = "couchbase-lite-net-msft-{}-liteserv/LiteServ.exe".format(self.version_build)
        status = self.ansible_runner.run_ansible_playbook("install-liteserv-windows.yml", extra_vars={
            "directory_path": directory_path
        })

        if status != 0:
            raise LiteServError("Failed to install Liteserv on Windows host")

    def remove(self):
        log_info("Removing windows server from: {}".format(self.host))
        status = self.ansible_runner.run_ansible_playbook("remove-liteserv-msft.yml")
        if status != 0:
            raise LiteServError("Failed to install Liteserv on Windows host")

    def start(self, logfile_name):
        """
        1. Starts a LiteServ with logging to provided logfile file object.
           The running LiteServ process will be stored in the self.process property.
        2. The method will poll on the endpoint to make sure LiteServ is available.
        3. The expected version will be compared with the version reported by http://<host>:<port>
        4. eturn the url of the running LiteServ
        """

        self._verify_not_running()

        self.logfile = logfile_name

        process_args = [
            "--port", str(self.port),
            "--dir", "."
        ]

        if self.storage_engine == "ForestDB" or self.storage_engine == "ForestDB+Encryption":
            process_args.append("--storage")
            process_args.append("ForestDB")
        else:
            process_args.append("--storage")
            process_args.append("SQLite")

        if self.storage_engine == "SQLCipher" or self.storage_engine == "ForestDB+Encryption":
            log_info("Using Encryption ...")
            db_flags = []
            for db_name in REGISTERED_CLIENT_DBS:
                db_flags.append("--dbpassword")
                db_flags.append("{}=pass".format(db_name))
            process_args.extend(db_flags)

        # The package structure for LiteServ is different pre 1.4. Handle for this case
        if self.version_build.startswith("1.2") or self.version_build.startswith("1.3"):
            binary_path = "couchbase-lite-net-msft-{}-liteserv/LiteServ.exe".format(self.version_build)
        else:
            binary_path = "couchbase-lite-net-msft-{}-liteserv/net45/LiteServ.exe".format(self.version_build)

        joined_args = " ".join(process_args)
        log_info("Starting LiteServ {} with: {}".format(binary_path, joined_args))

        # Start LiteServ via Ansible on remote machine
        status = self.ansible_runner.run_ansible_playbook(
            "start-liteserv-msft.yml",
            extra_vars={
                "binary_path": binary_path,
                "launch_args": joined_args,
            }
        )
        if status != 0:
            raise LiteServError("Could not start Liteserv")

        self._verify_launched()

        return "http://{}:{}".format(self.host, self.port)

    def _verify_launched(self):
        """Poll on expected http://<host>:<port> until it is reachable
        Assert that the response contains the expected version information
        """

        resp_obj = self._wait_until_reachable()
        log_info(resp_obj)

        # .NET Microsoft Windows 10.12/x86_64 1.3.1-build0013/5d1553d
        running_version = resp_obj["vendor"]["version"]

        if not running_version.startswith(".NET Microsoft Windows"):
            raise LiteServError("Invalid platform running!")

        #  ['.NET', 'Microsoft', 'Windows', '10', 'Enterprise', 'x64', '1.4.0', 'build0043', '5cfe25b']
        running_version_parts = re.split("[ /-]", running_version)
        running_version = running_version_parts[6]
        running_build = int(running_version_parts[7].strip("build"))
        running_version_composed = "{}-{}".format(running_version, running_build)

        if self.version_build != running_version_composed:
            raise LiteServError("Expected version does not match actual version: Expected={}  Actual={}".format(
                self.version_build,
                running_version_composed)
            )

    def stop(self):
        """
        Stops a .NET listener on a remote windows machine via ansible and pulls logs.
        """

        binary_path = "couchbase-lite-net-msft-{}-liteserv/LiteServ.exe".format(self.version_build)

        log_full_path = "{}/{}".format(os.getcwd(), self.logfile)

        log_info("Stoping {} on windows maching ...".format(binary_path))
        log_info("Pulling logs to {} ...".format(log_full_path))

        status = self.ansible_runner.run_ansible_playbook(
            "stop-liteserv-windows.yml",
            extra_vars={
                "binary_path": binary_path,
                "log_full_path": log_full_path
            }
        )
        if status != 0:
            raise LiteServError("Could not start Liteserv")
Beispiel #42
0
class LiteServNetMsft(LiteServBase):
    def __init__(self, version_build, host, port, storage_engine):

        # Initialize baseclass properies
        super(LiteServNetMsft, self).__init__(version_build, host, port,
                                              storage_engine)

        if "LITESERV_MSFT_HOST_USER" not in os.environ:
            raise LiteServError(
                "Make sure you define 'LITESERV_MSFT_HOST_USER' as the windows user for the host you are targeting"
            )

        if "LITESERV_MSFT_HOST_PASSWORD" not in os.environ:
            raise LiteServError(
                "Make sure you define 'LITESERV_MSFT_HOST_PASSWORD' as the windows user for the host you are targeting"
            )

        # Create config for LiteServ Windows host
        ansible_liteserv_mfst_target_lines = [
            "[windows]",
            "win1 ansible_host={}".format(host),
            "[windows:vars]",
            "ansible_user={}".format(os.environ["LITESERV_MSFT_HOST_USER"]),
            "ansible_password={}".format(
                os.environ["LITESERV_MSFT_HOST_PASSWORD"]),
            "ansible_port=5986",
            "ansible_connection=winrm",
            "# The following is necessary for Python 2.7.9+ when using default WinRM self-signed certificates:",
            "ansible_winrm_server_cert_validation=ignore",
        ]

        ansible_liteserv_mfst_target_string = "\n".join(
            ansible_liteserv_mfst_target_lines)
        log_info("Writing: {}".format(ansible_liteserv_mfst_target_string))
        config_location = "resources/liteserv_configs/net-msft"

        with open(config_location, "w") as f:
            f.write(ansible_liteserv_mfst_target_string)

        self.ansible_runner = AnsibleRunner(config=config_location)

    def download(self, version_build=None):
        """
        1. Downloads the LiteServ.zip package from latestbuild to the remote Windows host to Desktop\LiteServ\
        2. Extracts the package and removes the zip
        """
        if version_build is not None:
            self.version_build = version_build
        version, build = version_and_build(self.version_build)
        download_url = "{}/couchbase-lite-net/{}/{}/LiteServ.zip".format(
            LATEST_BUILDS, version, build)
        package_name = "couchbase-lite-net-msft-{}-liteserv".format(
            self.version_build)

        # Download LiteServ via Ansible on remote machine
        status = self.ansible_runner.run_ansible_playbook(
            "download-liteserv-msft.yml",
            extra_vars={
                "download_url": download_url,
                "package_name": package_name
            })

        if status != 0:
            raise LiteServError(
                "Failed to download LiteServ package on remote machine")

    def install(self):
        """
        Installs needed packages on Windows host and removes any existing service wrappers for LiteServ
        """
        # The package structure for LiteServ is different pre 1.4. Handle for this case
        if has_dot_net4_dot_5(self.version_build):
            directory_path = "couchbase-lite-net-msft-{}-liteserv/net45/LiteServ.exe".format(
                self.version_build)
        else:
            directory_path = "couchbase-lite-net-msft-{}-liteserv/LiteServ.exe".format(
                self.version_build)

        status = self.ansible_runner.run_ansible_playbook(
            "install-liteserv-windows.yml",
            extra_vars={"directory_path": directory_path})

        if status != 0:
            raise LiteServError("Failed to install Liteserv on Windows host")

    def remove(self):
        log_info("Removing windows server from: {}".format(self.host))
        status = self.ansible_runner.run_ansible_playbook(
            "remove-liteserv-msft.yml")
        if status != 0:
            raise LiteServError("Failed to install Liteserv on Windows host")

    def start(self, logfile_name):
        """
        1. Starts a LiteServ with logging to provided logfile file object.
           The running LiteServ process will be stored in the self.process property.
        2. The method will poll on the endpoint to make sure LiteServ is available.
        3. The expected version will be compared with the version reported by http://<host>:<port>
        4. eturn the url of the running LiteServ
        """

        self._verify_not_running()

        self.logfile = logfile_name

        process_args = ["--port", str(self.port), "--dir", "."]

        if self.storage_engine == "ForestDB" or self.storage_engine == "ForestDB+Encryption":
            process_args.append("--storage")
            process_args.append("ForestDB")
        else:
            process_args.append("--storage")
            process_args.append("SQLite")

        if self.storage_engine == "SQLCipher" or self.storage_engine == "ForestDB+Encryption":
            log_info("Using Encryption ...")
            db_flags = []
            for db_name in REGISTERED_CLIENT_DBS:
                db_flags.append("--dbpassword")
                db_flags.append("{}=pass".format(db_name))
            process_args.extend(db_flags)

        # The package structure for LiteServ is different pre 1.4. Handle for this case
        if has_dot_net4_dot_5(self.version_build):
            binary_path = "couchbase-lite-net-msft-{}-liteserv/net45/LiteServ.exe".format(
                self.version_build)
        else:
            binary_path = "couchbase-lite-net-msft-{}-liteserv/LiteServ.exe".format(
                self.version_build)

        joined_args = " ".join(process_args)
        log_info("Starting LiteServ {} with: {}".format(
            binary_path, joined_args))

        # Start LiteServ via Ansible on remote machine
        status = self.ansible_runner.run_ansible_playbook(
            "start-liteserv-msft.yml",
            extra_vars={
                "binary_path": binary_path,
                "launch_args": joined_args,
            })
        if status != 0:
            raise LiteServError("Could not stop Liteserv")

        self._verify_launched()

        return "http://{}:{}".format(self.host, self.port)

    def _verify_launched(self):
        """Poll on expected http://<host>:<port> until it is reachable
        Assert that the response contains the expected version information
        """

        resp_obj = self._wait_until_reachable()
        log_info(resp_obj)

        # .NET Microsoft Windows 10.12/x86_64 1.3.1-build0013/5d1553d
        running_version = resp_obj["vendor"]["version"]

        if not running_version.startswith(".NET Microsoft Windows"):
            raise LiteServError("Invalid platform running!")

        #  ['.NET', 'Microsoft', 'Windows', '10', 'Enterprise', 'x64', '1.4.0', 'build0043', '5cfe25b']
        running_version_parts = re.split("[ /-]", running_version)
        running_version = running_version_parts[6]
        running_build = int(running_version_parts[7].strip("build"))
        running_version_composed = "{}-{}".format(running_version,
                                                  running_build)

        if self.version_build != running_version_composed:
            raise LiteServError(
                "Expected version does not match actual version: Expected={}  Actual={}"
                .format(self.version_build, running_version_composed))

    def stop(self):
        """
        Stops a .NET listener on a remote windows machine via ansible and pulls logs.
        """

        # The package structure for LiteServ is different pre 1.4. Handle for this case
        if has_dot_net4_dot_5(self.version_build):
            binary_path = "couchbase-lite-net-msft-{}-liteserv/net45/LiteServ.exe".format(
                self.version_build)
        else:
            binary_path = "couchbase-lite-net-msft-{}-liteserv/LiteServ.exe".format(
                self.version_build)

        log_full_path = "{}/{}".format(os.getcwd(), self.logfile)

        log_info("Stoping {} on windows maching ...".format(binary_path))
        log_info("Pulling logs to {} ...".format(log_full_path))

        status = self.ansible_runner.run_ansible_playbook(
            "stop-liteserv-windows.yml",
            extra_vars={
                "binary_path": binary_path,
                "log_full_path": log_full_path
            })
        if status != 0:
            raise LiteServError("Could not start Liteserv")
def run_gateload_perf_test(gen_gateload_config, test_id, gateload_params, delay_profiling_secs, delay_expvar_collect_secs):

    try:
        cluster_config = os.environ["CLUSTER_CONFIG"]
    except KeyError:
        print ("Make sure CLUSTER_CONFIG is defined and pointing to the configuration you would like to provision")
        sys.exit(1)

    print("Running perf test against cluster: {}".format(cluster_config))
    ansible_runner = AnsibleRunner(cluster_config)

    # Install + configure telegraf
    status = ansible_runner.run_ansible_playbook("install-telegraf.yml")
    if status != 0:
        raise ProvisioningError("Failed to install telegraf")

    test_run_id = "{}_{}".format(test_id, time.strftime("%Y-%m-%d-%H-%M-%S"))

    # Create test results directory
    os.makedirs("testsuites/syncgateway/performance/results/{}".format(test_run_id))

    # Copy provisioning_config to performance_results/ folder
    shutil.copy("{}".format(cluster_config), "testsuites/syncgateway/performance/results/{}".format(test_run_id))

    if int(gateload_params.number_pullers) > 0 and not gen_gateload_config:
        raise Exception("You specified --num-pullers but did not set --gen-gateload-config")

    # Build gateload
    print(">>> Building gateload")
    status = ansible_runner.run_ansible_playbook(
        "build-gateload.yml",
        extra_vars={},
    )
    assert status == 0, "Could not build gateload"

    # Generate gateload config
    print(">>> Generate gateload configs")
    if gen_gateload_config:
        generate_gateload_configs.main(
            cluster_config=cluster_config,
            test_id=test_run_id,
            gateload_params=gateload_params
        )

    print(">>> Starting profile collection scripts")
    runtime_s = int(gateload_params.runtime_ms) // 1000
    status = ansible_runner.run_ansible_playbook(
        "start-profile-collection.yml",
        extra_vars={
            "stats_run_time": runtime_s,
            "delay_profiling_secs": int(delay_profiling_secs)
        },
    )
    assert status == 0, "Could not start profiling collection scripts"

    # Start gateload
    print(">>> Starting gateload with {0} pullers and {1} pushers".format(
        gateload_params.number_pullers, gateload_params.number_pushers
    ))
    status = ansible_runner.run_ansible_playbook(
        "start-gateload.yml",
        extra_vars={
            "delay_expvar_collect_secs": int(delay_expvar_collect_secs)
        },
    )
    assert status == 0, "Could not start gateload"

    # write expvars to file, will exit when gateload scenario is done
    print(">>> Logging expvars")
    gateload_finished_successfully = log_expvars(cluster_config, test_run_id)

    print(">>> Fetch Sync Gateway profile")
    fetch_sync_gateway_profile(cluster_config, test_run_id)

    print(">>> Shutdown gateload")
    kill_gateload()

    if not gateload_finished_successfully:
        raise RuntimeError("It appears that gateload did not finish successfully.  Check logs for details")