def install_sync_gateway(cluster_config, sync_gateway_config):
    log_info(sync_gateway_config)

    if not sync_gateway_config.is_valid():
        raise ProvisioningError("Invalid sync_gateway provisioning configuration. Exiting ...")

    if sync_gateway_config.build_flags != "":
        log_warn("\n\n!!! WARNING: You are building with flags: {} !!!\n\n".format(sync_gateway_config.build_flags))

    ansible_runner = AnsibleRunner(cluster_config)
    config_path = os.path.abspath(sync_gateway_config.config_path)

    # Create buckets unless the user explicitly asked to skip this step
    if not sync_gateway_config.skip_bucketcreation:
        create_server_buckets(cluster_config, sync_gateway_config)

    # Install Sync Gateway via Source or Package
    if sync_gateway_config.commit is not None:
        # Install from source
        status = ansible_runner.run_ansible_playbook(
            "install-sync-gateway-source.yml",
            extra_vars={
                "sync_gateway_config_filepath": config_path,
                "commit": sync_gateway_config.commit,
                "build_flags": sync_gateway_config.build_flags
            }
        )
        if status != 0:
            raise ProvisioningError("Failed to install sync_gateway source")

    else:
        # Install from Package
        sync_gateway_base_url, sync_gateway_package_name, sg_accel_package_name = sync_gateway_config.sync_gateway_base_url_and_package()
        status = ansible_runner.run_ansible_playbook(
            "install-sync-gateway-package.yml",
            extra_vars={
                "couchbase_sync_gateway_package_base_url": sync_gateway_base_url,
                "couchbase_sync_gateway_package": sync_gateway_package_name,
                "couchbase_sg_accel_package": sg_accel_package_name,
                "sync_gateway_config_filepath": config_path
            }
        )
        if status != 0:
            raise ProvisioningError("Failed to install sync_gateway package")
def install_sync_gateway(cluster_config,
                         sync_gateway_config,
                         sg_ce=False,
                         sg_platform="centos",
                         sa_platform="centos"):

    log_info(sync_gateway_config)

    if sync_gateway_config.build_flags != "":
        log_warn(
            "\n\n!!! WARNING: You are building with flags: {} !!!\n\n".format(
                sync_gateway_config.build_flags))

    ansible_runner = AnsibleRunner(cluster_config)
    config_path = os.path.abspath(sync_gateway_config.config_path)
    couchbase_server_primary_node = add_cbs_to_sg_config_server_field(
        cluster_config)
    # Create buckets unless the user explicitly asked to skip this step
    if not sync_gateway_config.skip_bucketcreation:
        create_server_buckets(cluster_config, sync_gateway_config)

    server_port = 8091
    server_scheme = "http"

    if is_cbs_ssl_enabled(cluster_config):
        server_port = 18091
        server_scheme = "https"

    # Shared vars
    playbook_vars = {
        "sync_gateway_config_filepath": config_path,
        "server_port": server_port,
        "server_scheme": server_scheme,
        "autoimport": "",
        "xattrs": "",
        "no_conflicts": "",
        "couchbase_server_primary_node": couchbase_server_primary_node
    }

    if is_xattrs_enabled(cluster_config):
        playbook_vars["autoimport"] = '"import_docs": "continuous",'
        playbook_vars["xattrs"] = '"enable_shared_bucket_access": true,'

    if no_conflicts_enabled(cluster_config):
        playbook_vars["no_conflicts"] = '"allow_conflicts": false,'
    try:
        revs_limit = get_revs_limit(cluster_config)
        playbook_vars["revs_limit"] = '"revs_limit": {},'.format(revs_limit)
    except KeyError as ex:
        log_info("Keyerror in getting revs_limit{}".format(ex.message))
    # Install Sync Gateway via Source or Package
    if sync_gateway_config.commit is not None:
        # Install from source
        playbook_vars["commit"] = sync_gateway_config.commit
        playbook_vars["build_flags"] = sync_gateway_config.build_flags

        status = ansible_runner.run_ansible_playbook(
            "install-sync-gateway-source.yml", extra_vars=playbook_vars)
        if status != 0:
            raise ProvisioningError("Failed to install sync_gateway source")

    else:
        # Install from Package
        sync_gateway_base_url, sync_gateway_package_name, sg_accel_package_name = sync_gateway_config.sync_gateway_base_url_and_package(
            sg_ce=sg_ce, sg_platform=sg_platform, sa_platform=sa_platform)

        playbook_vars[
            "couchbase_sync_gateway_package_base_url"] = sync_gateway_base_url
        playbook_vars[
            "couchbase_sync_gateway_package"] = sync_gateway_package_name
        playbook_vars["couchbase_sg_accel_package"] = sg_accel_package_name

        if sg_platform == "windows":
            status = ansible_runner.run_ansible_playbook(
                "install-sync-gateway-package-windows.yml",
                extra_vars=playbook_vars)
        else:
            status = ansible_runner.run_ansible_playbook(
                "install-sync-gateway-package.yml", extra_vars=playbook_vars)

        if status != 0:
            raise ProvisioningError("Failed to install sync_gateway package")

        if sa_platform == "windows":
            status = ansible_runner.run_ansible_playbook(
                "install-sg-accel-package-windows.yml",
                extra_vars=playbook_vars)
        else:
            status = ansible_runner.run_ansible_playbook(
                "install-sg-accel-package.yml", extra_vars=playbook_vars)
        if status != 0:
            raise ProvisioningError("Failed to install sg_accel package")

    # Configure aws cloudwatch logs forwarder
    status = ansible_runner.run_ansible_playbook(
        "configure-sync-gateway-awslogs-forwarder.yml", extra_vars={})
    if status != 0:
        raise ProvisioningError(
            "Failed to configure sync_gateway awslogs forwarder")
Exemplo n.º 3
0
def verify_changes(users, expected_num_docs, expected_num_revisions, expected_docs, ignore_rev_ids=False):

    # When users create or update a doc on sync_gateway, the response of the REST call
    # is stored in the users cache. 'expected_docs' is a scenario level dictionary created
    # from the combination of these user caches. This is used to create expected results
    # when comparing against the changes feed for each user.

    errors = {
        "unexpected_changes_length": 0,
        "invalid_expected_docs_length": 0,
        "duplicate_expected_ids": 0,
        "duplicate_changes_doc_ids": 0,
        "expected_doc_ids_differ_from_changes_doc_ids": 0,
        "invalid_rev_id": 0,
        "unexpected_rev_id_prefix": 0,
        "unexpected_num_updates": 0
    }

    if type(users) is list:
        user_list = users
    else:
        # Allow a single user to be passed
        user_list = list()
        user_list.append(users)

    if type(expected_docs) is not dict:
        log_error("expected_docs is not a dictionary")
        raise Exception("Make sure 'expected_docs' is a dictionary")

    for user in user_list:

        changes = user.get_changes(include_docs=True)
        results = changes["results"]

        changes_results = list()
        for result in results:
            changes_result = dict()
            if not result["id"].startswith("_user"):
                changes_result["id"] = result["doc"]["_id"]
                changes_result["rev"] = result["doc"]["_rev"]
                changes_result["updates"] = result["doc"]["updates"]
                changes_results.append(changes_result)

        # Check expected_num_docs matches number of changes results
        if expected_num_docs != len(changes_results):
            log_error("{0} -> {1} expected_num_docs != {2} len(changes_results)".format(user.name, expected_num_docs, len(changes_results)))
            errors["unexpected_changes_length"] += 1

        # Check number of expected num docs matched number of expected doc ids
        if expected_num_docs != len(expected_docs):
            log_error("{0} -> {1} expected_num_docs != {2} len(expected_docs)".format(user.name, expected_num_docs, len(expected_docs)))
            errors["invalid_expected_docs_length"] += 1

        # Get ids from expected docs
        expected_doc_ids = expected_docs.keys()

        # Assert there are no duplicates in expected doc ids
        if len(expected_doc_ids) != len(set(expected_doc_ids)):
            log_error("{0} -> Duplicates found in expected_doc_ids".format(user.name))
            errors["duplicate_expected_ids"] += 1

        # Get ids from all changes results
        changes_doc_ids = [result["id"] for result in changes_results]

        # Assert there are no duplicates in changes doc ids
        if len(changes_doc_ids) != len(set(changes_doc_ids)):
            log_error("{0} -> Duplicates found in changes doc ids".format(user.name))
            errors["duplicate_changes_doc_ids"] += 1

        # Assert the expected doc ids and changes doc ids are the same
        if set(expected_doc_ids) != set(changes_doc_ids):
            log_error("{0} -> changes feed doc ids differ from expected doc ids".format(user.name))
            different_docs = set(expected_doc_ids) - set(changes_doc_ids)
            log_error("{0} -> Set difference {1}".format(user.name, different_docs))
            errors["expected_doc_ids_differ_from_changes_doc_ids"] += 1

        if ignore_rev_ids:
            log_warn("WARNING: Ignoring rev id verification!!")

        for result in changes_results:
            if not ignore_rev_ids:
                # Compare revision number for id
                if expected_docs[result["id"]] != result["rev"]:
                    errors["invalid_rev_id"] += 1

            # IMPORTANT - This assumes that no conflicts are created via new_edits in the doc PUT
            # Assert that the revision id prefix matches the number of expected revisions
            rev_id_prefix = result["rev"].split("-")[0]

            # rev-id prefix will be 1 when document is created
            # For any non-conflicting update, it will be incremented by one
            if expected_num_revisions != int(rev_id_prefix) - 1:
                log_error("{0} -> expected_num_revisions {1} does not match stored rev_id_prefix: {2}".format(user.name, expected_num_revisions, rev_id_prefix))
                errors["unexpected_rev_id_prefix"] += 1

            # Check number of expected updates matched the updates on the _changes doc
            if expected_num_revisions != result["updates"]:
                log_error("{0} -> expected_num_revisions {1} does not match number of updates {2}".format(user.name, expected_num_revisions, result["updates"]))
                errors["unexpected_num_updates"] += 1

        # Allow printing updates even if changes feed length is 0
        if len(changes_results) == 0:
            updates = 0
        else:
            updates = changes_results[0]["updates"]

        log_info(" -> |{0}| expected (num_docs: {1} num_revisions: {2}) _changes (num_docs: {3} updates: {4})".format(
            user.name,
            expected_num_docs,
            expected_num_revisions,
            len(changes_doc_ids),
            updates
        ))

        # Print any error that may have occured
        error_count = 0
        for key, val in errors.items():
            if val != 0:
                log_error("<!> VERIFY ERROR - name: {}: occurences: {}".format(key, val))
                error_count += 1

        assert error_count == 0
Exemplo n.º 4
0
def write_config(config, pool_file, use_docker, sg_windows, sg_accel_windows):

    connection_string = ""
    if use_docker:
        connection_string = "ansible_connection=docker"

    ips, ip_to_node_type = get_hosts(pool_file)
    ip_to_node_type_len = len(ip_to_node_type)
    ip_to_node_type_defined = False

    resource_folder = os.path.dirname(pool_file)

    log_info("ips: {}".format(ips))

    if len(ips) < config.num_machines_required():
        log_warn(
            "WARNING: Skipping config {} since {} machines required, but only {} provided"
            .format(config.name, config.num_machines_required(), len(ips)))
        return

    if ip_to_node_type_len > 0:
        ip_to_node_type_defined = True

    # Check for number of IPs versus number of IPs in ip_to_node_type
    if ip_to_node_type and len(ip_to_node_type) != len(ips):
        raise Exception(
            "Number of IPs in resources/pool:ips and ip_to_node_type do not match. Exiting ..."
        )

    log_info("\nGenerating config: {}".format(config.name))

    ansible_cluster_conf_file = resource_folder + "/cluster_configs/{}".format(
        config.name)
    cluster_json_file = resource_folder + "/cluster_configs/{}.json".format(
        config.name)

    with open(ansible_cluster_conf_file, "w") as f:

        hosts = []
        couchbase_servers = []
        sync_gateways = []
        accels = []
        load_generators = []
        load_balancers = []

        f.write("[pool]\n")
        count = 1
        for ip in ips:
            f.write("ma{} ansible_host={} {}\n".format(count, ip,
                                                       connection_string))
            hosts.append({"name": "host{}".format(count), "ip": ip})
            count += 1

        f.write("\n")
        f.write("\n")

        # Write Servers
        cbs_ips_to_remove = []
        f.write("[couchbase_servers]\n")
        for i in range(config.num_cbs):
            # Check if the IP is present in the ip_to_node_type

            j = 0
            found = False
            while ip_to_node_type_defined and j < len(ips):
                if ips[j] not in ip_to_node_type:
                    raise Exception("{} not in ip_to_node_type".format(ips[j]))

                if ip_to_node_type[ips[j]] != "couchbase_servers" or ips[
                        j] in cbs_ips_to_remove:
                    # IP is not a cbs or if the cbs is already recorded
                    j += 1
                    continue
                else:
                    found = True
                    break

            # Check if the number of cbs in the ip_to_node_type match the config
            if ip_to_node_type_defined and not found:
                log_warn(
                    "WARNING: Skipping config {} since {} couchbase_servers required, but only {} provided"
                    .format(config.name, config.num_cbs,
                            len(cbs_ips_to_remove)))

                # Sometimes the config file is partially generated, correct sg but invalid cb etc.
                log_warn("WARNING: Removing the partially generated config {}".
                         format(config.name))
                os.unlink(f.name)

                return

            # j is the counter for ip_to_node_type which is invalid if not defined
            if ip_to_node_type_defined:
                ip = ips[j]
            else:
                ip = ips[i]

            f.write("cb{} ansible_host={} {}\n".format(i + 1, ip,
                                                       connection_string))
            couchbase_servers.append({"name": "cb{}".format(i + 1), "ip": ip})
            cbs_ips_to_remove.append(ip)

        for cbs_ip in cbs_ips_to_remove:
            ips.remove(cbs_ip)

        f.write("\n")

        # Write sync_gateways
        f.write("[sync_gateways]\n")
        sg_ips_to_remove = []
        for i in range(config.num_sgs):
            # Check if the IP is present in the ip_to_node_type
            j = 0
            found = False

            while ip_to_node_type_defined and j < len(ips):
                if ips[j] not in ip_to_node_type:
                    raise Exception("{} not in ip_to_node_type".format(ips[j]))

                if ip_to_node_type[ips[j]] != "sync_gateways" or ips[
                        j] in sg_ips_to_remove:
                    # IP is not a sg or if the sg is already recorded
                    j += 1
                    continue
                else:
                    found = True
                    break

            # Check if the number of sgs in the ip_to_node_type match the config
            if ip_to_node_type_defined and not found:
                log_warn(
                    "WARNING: Skipping config {} since {} sync_gateways required, but only {} provided"
                    .format(config.name, config.num_sgs,
                            len(sg_ips_to_remove)))

                # Sometimes the config file is partially generated, correct cbs but invalid sg etc.
                log_warn("WARNING: Removing the partially generated config {}".
                         format(config.name))
                os.unlink(f.name)

                return

            # j is the counter for ip_to_node_type which is invalid if not defined
            if ip_to_node_type_defined:
                ip = ips[j]
            else:
                ip = ips[i]

            f.write("sg{} ansible_host={} {}\n".format(i + 1, ip,
                                                       connection_string))
            sync_gateways.append({"name": "sg{}".format(i + 1), "ip": ip})
            sg_ips_to_remove.append(ip)

        for sg_ip in sg_ips_to_remove:
            print "REMOVING {} and {} from {}".format(sg_ip, sg_ips_to_remove,
                                                      ips)
            ips.remove(sg_ip)

        f.write("\n")

        # Write sg_accels
        ac_ips_to_remove = []
        f.write("[sg_accels]\n")
        for i in range(config.num_acs):
            # Check if the IP is present in the ip_to_node_type
            j = 0
            found = False

            while ip_to_node_type_defined and j < len(ips):
                if ips[j] not in ip_to_node_type:
                    raise Exception("{} not in ip_to_node_type".format(ips[j]))

                if ip_to_node_type[
                        ips[j]] != "sg_accels" or ips[j] in ac_ips_to_remove:
                    # IP is not a ac or if the ac is already recorded
                    j += 1
                    continue
                else:
                    found = True
                    break

            # Check if the number of acs in the ip_to_node_type match the config
            if ip_to_node_type_defined and not found:
                log_warn(
                    "WARNING: Skipping config {} since {} sg_accels required, but only {} provided"
                    .format(config.name, config.num_acs,
                            len(ac_ips_to_remove)))

                # Sometimes the config file is partially generated, correct cbs but invalid ac etc.
                log_warn("WARNING: Removing the partially generated config {}".
                         format(config.name))
                os.unlink(f.name)

                return

            # j is the counter for ip_to_node_type which is invalid if not defined
            if ip_to_node_type_defined:
                ip = ips[j]
            else:
                ip = ips[i]

            f.write("ac{} ansible_host={} {}\n".format(i + 1, ip,
                                                       connection_string))
            accels.append({"name": "ac{}".format(i + 1), "ip": ip})
            ac_ips_to_remove.append(ip)

        for ac_ip in ac_ips_to_remove:
            ips.remove(ac_ip)

        f.write("\n")

        # Write load generators
        lg_ips_to_remove = []
        f.write("[load_generators]\n")
        for i in range(config.num_lgs):
            # Check if the IP is present in the ip_to_node_type
            j = 0
            found = False
            while ip_to_node_type_defined and j < len(ips):
                if ips[j] not in ip_to_node_type:
                    raise Exception("{} not in ip_to_node_type".format(ips[j]))

                if ip_to_node_type[ips[j]] != "load_generators" or ips[
                        j] in lg_ips_to_remove:
                    # IP is not a lg or if the lg is already recorded
                    j += 1
                    continue
                else:
                    found = True
                    break

            # Check if the number of lgs in the ip_to_node_type match the config
            if ip_to_node_type_defined and not found:
                log_warn(
                    "WARNING: Skipping config {} since {} load_generators required, but only {} provided"
                    .format(config.name, config.num_lgs,
                            len(lg_ips_to_remove)))

                # Sometimes the config file is partially generated, correct cbs but invalid lg etc.
                log_warn("WARNING: Removing the partially generated config {}".
                         format(config.name))
                os.unlink(f.name)

                return

            # j is the counter for ip_to_node_type which is invalid if not defined
            if ip_to_node_type_defined:
                ip = ips[j]
            else:
                ip = ips[i]

            f.write("lg{} ansible_host={} {}\n".format(i + 1, ip,
                                                       connection_string))
            load_generators.append({"name": "lg{}".format(i + 1), "ip": ip})
            lg_ips_to_remove.append(ip)

        for lg_ip in lg_ips_to_remove:
            ips.remove(lg_ip)

        f.write("\n")

        # Write load balancers
        lb_ips_to_remove = []
        f.write("[load_balancers]\n")
        for i in range(config.num_lbs):
            # Check if the IP is present in the ip_to_node_type
            j = 0
            found = False
            while ip_to_node_type_defined and j < len(ips):
                if ips[j] not in ip_to_node_type:
                    raise Exception("{} not in ip_to_node_type".format(ips[j]))

                if ip_to_node_type[ips[j]] != "load_balancers" or ips[
                        j] in lb_ips_to_remove:
                    # IP is not a lb or if the lb is already recorded
                    j += 1
                    continue
                else:
                    found = True
                    break

            # Check if the number of lbs in the ip_to_node_type match the config
            if ip_to_node_type_defined and not found:
                log_warn(
                    "WARNING: Skipping config {} since {} load_balancers required, but only {} provided"
                    .format(config.name, config.num_lbs,
                            len(lb_ips_to_remove)))

                # Sometimes the config file is partially generated, correct cbs but invalid lb etc.
                log_warn("WARNING: Removing the partially generated config {}".
                         format(config.name))
                os.unlink(f.name)

                return

            # j is the counter for ip_to_node_type which is invalid if not defined
            if ip_to_node_type_defined:
                ip = ips[j]
            else:
                ip = ips[i]

            f.write("lb{} ansible_host={} {}\n".format(i + 1, ip,
                                                       connection_string))
            load_balancers.append({"name": "lb{}".format(i + 1), "ip": ip})
            lb_ips_to_remove.append(ip)

        for lb_ip in lb_ips_to_remove:
            ips.remove(lb_ip)

        f.write("\n")

        # Get local address to run webhook server on
        # TODO: make the webhook receiver it's own endpoint, or come up w/ better design.
        try:
            f.write("[webhook_ip]\n")
            # HACK: http://stackoverflow.com/questions/166506/finding-local-ip-addresses-using-pythons-stdlib
            # Connect to Google's public DNS server and get the socketname tuple (<local_ip_address>, <port>)
            # The 'local_ip_address' is the ip of the machine on the LAN. This will be used to run mock server
            # for the web hook tests. It will be exposed on the LAN so that other machines on the LAN can connect to it
            s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
            s.connect(("8.8.8.8", 80))
            local_ip = s.getsockname()[0]
            s.close()

            log_info("webhook ip: {}".format(local_ip))
            f.write("tf1 ansible_host={} {}".format(local_ip,
                                                    connection_string))
        except Exception as e:
            log_error(
                "Failed to find local_ip, webhook tests will fail.  Error: {}".
                format(e))

        f.write("\n\n[environment]\n")
        f.write("cbs_ssl_enabled=False\n")
        f.write("xattrs_enabled=False\n")
        f.write("sg_lb_enabled=False\n")

        if sg_windows:
            f.write("\n\n[sync_gateways:vars]\n")
            f.write("ansible_user=FakeUser\n")
            f.write("ansible_password=FakePassword\n")
            f.write("ansible_port=5986\n")
            f.write("ansible_connection=winrm\n")
            f.write("ansible_winrm_server_cert_validation=ignore\n")

        if sg_accel_windows:
            f.write("\n\n[sg_accels:vars]\n")
            f.write("ansible_user=FakeUser\n")
            f.write("ansible_password=FakePassword\n")
            f.write("ansible_port=5986\n")
            f.write("ansible_connection=winrm\n")
            f.write("ansible_winrm_server_cert_validation=ignore\n")

        log_info("Generating {}.json".format(config.name))

        # Write json file consumable by testkit.cluster class
        cluster_dict = {
            "hosts": hosts,
            "couchbase_servers": couchbase_servers,
            "sync_gateways": sync_gateways,
            "sg_accels": accels,
            "load_generators": load_generators,
            "load_balancers": load_balancers,
            "environment": {
                "cbs_ssl_enabled": False,
                "xattrs_enabled": False,
                "sg_lb_enabled": False
            }
        }

        with open(cluster_json_file, "w") as f_json:
            f_json.write(json.dumps(cluster_dict, indent=4))
def write_config(config, pool_file):
    ips = get_ips(pool_file)
    log_info("ips: {}".format(ips))

    if len(ips) < config.num_machines_required():
        log_warn("WARNING: Skipping config {} since {} machines required, but only {} provided".format(
            config.name,
            config.num_machines_required(),
            len(ips))
        )
        return

    log_info("\nGenerating config: {}".format(config.name))

    ansible_cluster_conf_file = "resources/cluster_configs/{}".format(config.name)
    cluster_json_file = "resources/cluster_configs/{}.json".format(config.name)

    with open(ansible_cluster_conf_file, "w") as f:

        hosts = []
        couchbase_servers = []
        sync_gateways = []
        accels = []
        load_generators = []
        load_balancers = []

        f.write("[pool]\n")
        count = 1
        for ip in ips:
            f.write("ma{} ansible_host={}\n".format(count, ip))
            hosts.append({
                "name": "host{}".format(count),
                "ip": ip
            })
            count += 1

        f.write("\n")
        f.write("\n")

        # Write Servers
        cbs_ips_to_remove = []
        f.write("[couchbase_servers]\n")
        for i in range(config.num_cbs):
            ip = ips[i]
            f.write("cb{} ansible_host={}\n".format(i + 1, ip))
            couchbase_servers.append({
                "name": "cb{}".format(i + 1),
                "ip": ip
            })
            cbs_ips_to_remove.append(ip)

        for cbs_ip in cbs_ips_to_remove:
            ips.remove(cbs_ip)

        f.write("\n")

        # Write sync_gateways
        f.write("[sync_gateways]\n")
        sg_ips_to_remove = []
        for i in range(config.num_sgs):
            ip = ips[i]
            f.write("sg{} ansible_host={}\n".format(i + 1, ip))
            sync_gateways.append({
                "name": "sg{}".format(i + 1),
                "ip": ip
            })
            sg_ips_to_remove.append(ip)

        for sg_ip in sg_ips_to_remove:
            ips.remove(sg_ip)

        f.write("\n")

        # Write sg_accels
        ac_ips_to_remove = []
        f.write("[sg_accels]\n")
        for i in range(config.num_acs):
            ip = ips[i]
            f.write("ac{} ansible_host={}\n".format(i + 1, ip))
            accels.append({
                "name": "ac{}".format(i + 1),
                "ip": ip
            })
            ac_ips_to_remove.append(ip)

        for ac_ip in ac_ips_to_remove:
            ips.remove(ac_ip)

        f.write("\n")

        # Write load generators
        lg_ips_to_remove = []
        f.write("[load_generators]\n")
        for i in range(config.num_lgs):
            ip = ips[i]
            f.write("lg{} ansible_host={}\n".format(i + 1, ip))
            load_generators.append({
                "name": "lg{}".format(i + 1),
                "ip": ip
            })
            lg_ips_to_remove.append(ip)

        for lg_ip in lg_ips_to_remove:
            ips.remove(lg_ip)

        f.write("\n")

        # Write load balancers
        lb_ips_to_remove = []
        f.write("[load_balancers]\n")
        for i in range(config.num_lbs):
            ip = ips[i]
            f.write("lb{} ansible_host={}\n".format(i + 1, ip))
            load_balancers.append({
                "name": "lb{}".format(i + 1),
                "ip": ip
            })
            lb_ips_to_remove.append(ip)

        for lb_ip in lb_ips_to_remove:
            ips.remove(lb_ip)

        f.write("\n")

        # Get local address to run webhook server on
        # TODO: make the webhook receiver it's own endpoint, or come up w/ better design.
        try:
            f.write("[webhook_ip]\n")
            # HACK: http://stackoverflow.com/questions/166506/finding-local-ip-addresses-using-pythons-stdlib
            # Connect to Google's public DNS server and get the socketname tuple (<local_ip_address>, <port>)
            # The 'local_ip_address' is the ip of the machine on the LAN. This will be used to run mock server
            # for the web hook tests. It will be exposed on the LAN so that other machines on the LAN can connect to it
            s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
            s.connect(("8.8.8.8", 80))
            local_ip = s.getsockname()[0]
            s.close()

            log_info("webhook ip: {}".format(local_ip))
            f.write("tf1 ansible_host={}".format(local_ip))
        except Exception as e:
            log_error("Failed to find local_ip, webhook tests will fail.  Error: {}".format(e))

        log_info("Generating {}.json".format(config.name))

        # Write json file consumable by testkit.cluster class
        cluster_dict = {
            "hosts": hosts,
            "couchbase_servers": couchbase_servers,
            "sync_gateways": sync_gateways,
            "sg_accels": accels,
            "load_generators": load_generators,
            "load_balancers": load_balancers
        }

        with open(cluster_json_file, "w") as f_json:
            f_json.write(json.dumps(cluster_dict, indent=4))