Ejemplo n.º 1
0
def scan_for_errors(error_strings, log_file_path):
    """
    Scans a log file line by line for a provided array of words.
    We use this to look for errors, so we expect that no words will be found
    If any of the words are found, we raise an exception.

    'error_strings' should be a list. Example ['panic', 'error']
    """

    if type(error_strings) != list:
        raise ValueError("'error_strings must be a list'")

    log_info("Looking for {} in {} ...".format(error_strings, log_file_path))

    # Scan each line in the log file for the words to search for
    with open(log_file_path) as f:
        for line in f:
            for word in error_strings:
                # convert the word to lowercase and the line to all lower case
                # which handles the case where 'warning' will catch 'WARNING' and 'Warning', etc
                if word.lower() in line.lower():
                    log_error(line)
                    raise AssertionError("{} found!! Please review: {} ".format(word, log_file_path))

    # No errors found
    log_info("Scan complete. Did not find any error strings.")
def get_ips(pool_file="resources/pool.json"):
    with open(pool_file) as f:
        pool_dict = json.loads(f.read())
        ips = pool_dict["ips"]

    # Make sure there are no duplicate endpoints
    if len(ips) != len(set(ips)):
        log_error("Duplicate endpoints found in 'resources/pools'. Make sure they are unique. Exiting ...")
        sys.exit(1)

    return ips
Ejemplo n.º 3
0
def get_hosts(pool_file="resources/pool.json"):
    with open(pool_file) as f:
        pool_dict = json.loads(f.read())
        ips = pool_dict["ips"]
        ip_to_node_type = []

        if "ip_to_node_type" in pool_dict:
            ip_to_node_type = pool_dict["ip_to_node_type"]

    # Make sure there are no duplicate endpoints
    if len(ips) != len(set(ips)):
        log_error(
            "Duplicate endpoints found in 'resources/pools'. Make sure they are unique. Exiting ..."
        )
        sys.exit(1)

    return ips, ip_to_node_type
    def _verify_stopped(self):
        """Polls until the server url is unreachable"""

        start = time.time()
        while True:
            if time.time() - start > keywords.constants.CLIENT_REQUEST_TIMEOUT:
                raise TimeoutError("Waiting for server to be unreachable but it never was!")
            try:
                resp = self._session.get("{}/pools".format(self.url))
                log_r(resp)
                resp.raise_for_status()
            except ConnectionError:
                # This is expected and used to determine if a server node has gone offline
                break

            except HTTPError as e:
                # 500 errors may happen as a result of the node going down
                log_error(e)
                continue

            time.sleep(1)
Ejemplo n.º 5
0
    def _verify_stopped(self):
        """Polls until the server url is unreachable"""

        start = time.time()
        while True:
            if time.time() - start > keywords.constants.CLIENT_REQUEST_TIMEOUT:
                raise TimeoutError(
                    "Waiting for server to be unreachable but it never was!")
            try:
                resp = self._session.get("{}/pools".format(self.url))
                log_r(resp)
                resp.raise_for_status()
            except ConnectionError:
                # This is expected and used to determine if a server node has gone offline
                break

            except HTTPError as e:
                # 500 errors may happen as a result of the node going down
                log_error(e)
                continue

            time.sleep(1)
def test_mulitple_users_mulitiple_channels_mulitple_revisions(
    params_from_base_test_setup, sg_conf_name, num_users, num_channels, num_docs, num_revisions
):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'mulitple_users_mulitiple_channels_mulitple_revisions'")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("sg_conf: {}".format(sg_conf))
    log_info("num_users: {}".format(num_users))
    log_info("num_channels: {}".format(num_channels))
    log_info("num_docs: {}".format(num_docs))
    log_info("num_revisions: {}".format(num_revisions))

    start = time.time()

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    init_completed = time.time()
    log_info("Initialization completed. Time taken:{}s".format(init_completed - start))

    channels = ["channel-" + str(i) for i in range(num_channels)]
    password = "******"

    sgs = cluster.sync_gateways

    admin = Admin(sgs[0])

    # Register User
    log_info("Register User")
    user_objects = admin.register_bulk_users(
        target=sgs[0], db="db", name_prefix="User", number=num_users, password=password, channels=channels
    )

    # Add User
    log_info("Add docs")
    in_parallel(user_objects, "add_docs", num_docs)

    # Update docs
    log_info("Update docs")
    in_parallel(user_objects, "update_docs", num_revisions)

    # Adding sleep to let sg to catch-up...
    # Without sleep this test fails in Channel-Cache mode and changes feed doesn't return the expected
    # num_revisions in docs.
    # The test passes in Distributed-Index mode.
    time.sleep(10)

    # Get changes for all users
    in_parallel(user_objects, "get_changes")

    # every user should have same number of docs
    # total/expected docs = num_users * num_docs
    recieved_docs = in_parallel(user_objects, "get_num_docs")

    expected_docs = num_users * num_docs
    for user_obj, docs in recieved_docs.items():
        log_info("User {} got {} docs, expected docs: {}".format(user_obj.name, docs, expected_docs))
        assert docs == expected_docs

    # Verify that
    # user created doc-ids exist in docs received in changes feed
    # expected revision is equal to received revision
    expected_revision = str(num_revisions + 1)
    docs_rev_dict = in_parallel(user_objects, "get_num_revisions")
    rev_errors = []
    for user_obj, docs_revision_dict in docs_rev_dict.items():
        for doc_id in docs_revision_dict.keys():
            rev = docs_revision_dict[doc_id]
            log_info(
                "User {} doc_id {} has {} revisions, expected revision: {}".format(
                    user_obj.name, doc_id, rev, expected_revision
                )
            )
            if rev != expected_revision:
                rev_errors.append(doc_id)
                log_error(
                    "User {} doc_id {} got revision {}, expected revision {}".format(
                        user_obj.name, doc_id, rev, expected_revision
                    )
                )

    assert len(rev_errors) == 0

    # Verify each User created docs are part of changes feed
    output = in_parallel(user_objects, "check_doc_ids_in_changes_feed")
    assert True in output.values()

    end = time.time()
    log_info("Test ended.")
    log_info("Main test duration: {}".format(end - init_completed))
    log_info("Test setup time: {}".format(init_completed - start))
    log_info("Total Time taken: {}s".format(end - start))
def test_bucket_online_offline_resync_with_offline(params_from_base_test_setup, sg_conf_name, num_users, num_docs, num_revisions):
    start = time.time()

    cluster_conf = params_from_base_test_setup["cluster_config"]
    test_mode = params_from_base_test_setup["mode"]

    if test_mode == "di":
        pytest.skip("Unsupported feature in distributed index")

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, test_mode)

    log_info("Running 'test_bucket_online_offline_resync_with_online'")
    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using num_users: {}".format(num_users))
    log_info("Using num_docs: {}".format(num_docs))
    log_info("Using num_revisions: {}".format(num_revisions))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_conf)

    init_completed = time.time()
    log_info("Initialization completed. Time taken:{}s".format(init_completed - start))

    num_channels = 1
    channels = ["channel-" + str(i) for i in range(num_channels)]
    password = "******"

    sgs = cluster.sync_gateways

    admin = Admin(sgs[0])

    # Register User
    log_info("Register User")
    user_objects = admin.register_bulk_users(target=sgs[0], db="db", name_prefix="User",
                                             number=num_users, password=password, channels=channels)
    user_x = admin.register_user(target=sgs[0], db="db", name="User-X", password="******", channels=["channel_x"])

    # Add User
    log_info("Add docs")
    in_parallel(user_objects, 'add_docs', num_docs)

    # Update docs
    log_info("Update docs")
    in_parallel(user_objects, 'update_docs', num_revisions)

    time.sleep(10)

    # Get changes for all users
    in_parallel(user_objects, 'get_changes')

    # every user should have same number of docs
    # total/expected docs = num_users * num_docs
    recieved_docs = in_parallel(user_objects, 'get_num_docs')

    expected_docs = num_users * num_docs
    for user_obj, docs in recieved_docs.items():
        log_info('User {} got {} docs, expected docs: {}'.format(user_obj.name, docs, expected_docs))
        assert docs == expected_docs

    # Verify that
    # user created doc-ids exist in docs received in changes feed
    # expected revision is equal to received revision
    expected_revision = str(num_revisions + 1)
    docs_rev_dict = in_parallel(user_objects, 'get_num_revisions')
    rev_errors = []
    for user_obj, docs_revision_dict in docs_rev_dict.items():
        for doc_id in docs_revision_dict.keys():
            rev = docs_revision_dict[doc_id]
            log_info('User {} doc_id {} has {} revisions, expected revision: {}'.format(
                user_obj.name,
                doc_id,
                rev,
                expected_revision
            ))
            if rev != expected_revision:
                rev_errors.append(doc_id)
                log_error('User {} doc_id {} got revision {}, expected revision {}'.format(
                    user_obj.name,
                    doc_id,
                    rev,
                    expected_revision
                ))

    assert len(rev_errors) == 0

    # Verify each User created docs are part of changes feed
    output = in_parallel(user_objects, 'check_doc_ids_in_changes_feed')
    assert True in output.values()

    # Take "db" offline
    status = admin.take_db_offline(db="db")
    assert status == 200

    sg_restart_config = sync_gateway_config_path_for_mode("bucket_online_offline/db_online_offline_access_restricted", test_mode)
    restart_status = cluster.sync_gateways[0].restart(sg_restart_config)
    assert restart_status == 0

    log_info("Sleeping....")
    time.sleep(10)
    pool = ThreadPool(processes=1)

    log_info("Restarted SG....")
    time.sleep(5)

    db_info = admin.get_db_info("db")
    log_info("Status of db = {}".format(db_info["state"]))
    assert db_info["state"] == "Offline"

    try:
        async_resync_result = pool.apply_async(admin.db_resync, ("db",))
        log_info("resync issued !!!!!!")
    except Exception as e:
        log_info("Catch resync exception: {}".format(e))

    time.sleep(1)
    resync_occured = False

    for i in range(20):
        db_info = admin.get_db_info("db")
        log_info("Status of db = {}".format(db_info["state"]))
        if db_info["state"] == "Resyncing":
            resync_occured = True
            log_info("Resync occured")
            try:
                status = admin.get_db_info(db="db")
                log_info("Got db_info request status: {}".format(status))
            except HTTPError as e:
                log_info("status = {} exception = {}".format(status, e.response.status_code))
                assert False
            else:
                log_info("Got 200 ok for supported operation")

        time.sleep(1)
        if resync_occured:
            break

    time.sleep(10)

    status = admin.bring_db_online(db="db")
    log_info("online request issued !!!!! response status: {}".format(status))

    time.sleep(5)
    db_info = admin.get_db_info("db")
    log_info("Status of db = {}".format(db_info["state"]))
    assert db_info["state"] == "Online"

    resync_result = async_resync_result.get()
    log_info("resync_changes {}".format(resync_result))
    log_info("expecting num_changes  == num_docs {} * num_users {}".format(num_docs, num_users))
    assert resync_result['payload']['changes'] == num_docs * num_users
    assert resync_result['status_code'] == 200

    time.sleep(5)
    global_cache = list()
    for user in user_objects:
        global_cache.append(user.cache)

    all_docs = {k: v for user_cache in global_cache for k, v in user_cache.items()}

    verify_changes(user_x, expected_num_docs=expected_docs, expected_num_revisions=num_revisions, expected_docs=all_docs)

    end = time.time()
    log_info("Test ended.")
    log_info("Main test duration: {}".format(end - init_completed))
    log_info("Test setup time: {}".format(init_completed - start))
    log_info("Total Time taken: {}s".format(end - start))
def test_bucket_online_offline_resync_sanity(params_from_base_test_setup, sg_conf_name, num_users, num_docs, num_revisions):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    test_mode = params_from_base_test_setup["mode"]

    if test_mode == "di":
        pytest.skip("Unsupported feature in distributed index")

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, test_mode)

    log_info("Running 'test_bucket_online_offline_resync_sanity'")
    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using num_users: {}".format(num_users))
    log_info("Using num_docs: {}".format(num_docs))
    log_info("Using num_revisions: {}".format(num_revisions))

    start = time.time()

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_conf)

    init_completed = time.time()
    log_info("Initialization completed. Time taken:{}s".format(init_completed - start))

    num_channels = 1
    channels = ["channel-" + str(i) for i in range(num_channels)]
    password = "******"

    sgs = cluster.sync_gateways

    admin = Admin(sgs[0])

    # Register User
    log_info("Register User")
    user_objects = admin.register_bulk_users(target=sgs[0], db="db", name_prefix="User",
                                             number=num_users, password=password, channels=channels)
    user_x = admin.register_user(target=sgs[0], db="db", name="User-X", password="******", channels=["channel_x"])

    # Add User
    log_info("Add docs")
    in_parallel(user_objects, 'add_docs', num_docs)

    # Update docs
    log_info("Update docs")
    in_parallel(user_objects, 'update_docs', num_revisions)

    time.sleep(10)

    # Get changes for all users
    in_parallel(user_objects, 'get_changes')

    # every user should have same number of docs
    # total/expected docs = num_users * num_docs
    recieved_docs = in_parallel(user_objects, 'get_num_docs')

    expected_docs = num_users * num_docs
    for user_obj, docs in recieved_docs.items():
        log_info('User {} got {} docs, expected docs: {}'.format(user_obj.name, docs, expected_docs))
        assert docs == expected_docs

    # Verify that
    # user created doc-ids exist in docs received in changes feed
    # expected revision is equal to received revision
    expected_revision = str(num_revisions + 1)
    docs_rev_dict = in_parallel(user_objects, 'get_num_revisions')
    rev_errors = []
    for user_obj, docs_revision_dict in docs_rev_dict.items():
        for doc_id in docs_revision_dict.keys():
            rev = docs_revision_dict[doc_id]
            log_info('User {} doc_id {} has {} revisions, expected revision: {}'.format(user_obj.name,
                                                                                        doc_id, rev, expected_revision))
            if rev != expected_revision:
                rev_errors.append(doc_id)
                log_error('User {} doc_id {} got revision {}, expected revision {}'.format(
                    user_obj.name,
                    doc_id,
                    rev,
                    expected_revision)
                )

    assert len(rev_errors) == 0

    # Verify each User created docs are part of changes feed
    output = in_parallel(user_objects, 'check_doc_ids_in_changes_feed')
    assert True in output.values()

    # Take "db" offline
    status = admin.take_db_offline(db="db")
    assert status == 200

    sg_restart_config = sync_gateway_config_path_for_mode("bucket_online_offline/db_online_offline_access_restricted", test_mode)
    restart_status = cluster.sync_gateways[0].restart(sg_restart_config)
    assert restart_status == 0

    time.sleep(10)

    num_changes = admin.db_resync(db="db")
    log_info("expecting num_changes {} == num_docs {} * num_users {}".format(num_changes, num_docs, num_users))
    assert num_changes['payload']['changes'] == num_docs * num_users

    status = admin.bring_db_online(db="db")
    assert status == 200

    time.sleep(5)
    global_cache = list()
    for user in user_objects:
        global_cache.append(user.cache)

    all_docs = {k: v for user_cache in global_cache for k, v in user_cache.items()}

    verify_changes(user_x, expected_num_docs=expected_docs, expected_num_revisions=num_revisions, expected_docs=all_docs)

    end = time.time()
    log_info("Test ended.")
    log_info("Main test duration: {}".format(end - init_completed))
    log_info("Test setup time: {}".format(init_completed - start))
    log_info("Total Time taken: {}s".format(end - start))
Ejemplo n.º 9
0
def verify_docs_removed(users, expected_num_docs, expected_docs):

    # Verifies that the expected_docs have all been flagged with _removed = true
    # Also verifies no duplication of changes results and set equality of the expected doc_ids
    # and the ids returned from the _changes feed

    errors = {
        "unexpected_changes_length": 0,
        "invalid_expected_docs_length": 0,
        "duplicate_expected_ids": 0,
        "duplicate_changes_doc_ids": 0,
        "expected_doc_ids_differ_from_changes_doc_ids": 0,
        "doc_not_removed": 0,
        "invalid_rev_id": 0
    }

    if type(users) is list:
        user_list = users
    else:
        # Allow a single user to be passed
        user_list = list()
        user_list.append(users)

    if type(expected_docs) is not dict:
        raise Exception("Make sure 'expected_docs' is a dictionary")

    for user in user_list:

        # get changes feed
        changes = user.get_changes(include_docs=True)
        results = changes["results"]

        changes_results = list()
        for result in results:
            changes_result = dict()
            if not result["id"].startswith("_user"):
                changes_result["id"] = result["doc"]["_id"]
                changes_result["rev"] = result["doc"]["_rev"]
                changes_result["removed"] = result["doc"]["_removed"]
                changes_results.append(changes_result)

        # Check expected_num_docs matches number of changes results
        if expected_num_docs != len(changes_results):
            errors["unexpected_changes_length"] += 1

        # Check number of expected num docs matched number of expected doc ids
        if expected_num_docs != len(expected_docs):
            errors["invalid_expected_docs_length"] += 1

        # Get ids from expected docs
        expected_doc_ids = expected_docs.keys()

        # Assert there are no duplicates in expected doc ids
        if len(expected_doc_ids) != len(set(expected_doc_ids)):
            errors["duplicate_expected_ids"] += 1

        # Get ids from all changes results
        changes_doc_ids = [result["id"] for result in changes_results]

        # Assert there are no duplicates in changes doc ids
        if len(changes_doc_ids) != len(set(changes_doc_ids)):
            errors["duplicate_changes_doc_ids"] += 1

        # Assert the expected doc ids and changes doc ids are the same
        if set(expected_doc_ids) != set(changes_doc_ids):
            errors["expected_doc_ids_differ_from_changes_doc_ids"] += 1

        num_doc_removed = 0
        for result in changes_results:

            # Check removed is set to true
            if result["removed"] is not True:
                errors["doc_not_removed"] += 1
            elif result["removed"] is True:
                num_doc_removed += 1

            # Compare revision number for id
            if expected_docs[result["id"]] != result["rev"]:
                errors["invalid_rev_id"] += 1

            # TODO - maybe try to ping doc endpoint and asser 4XX response?

        log_info(" -> REMOVED |{0}| expected (num_docs: {1}) _changes (num_docs: {2}, num_removed: {3})".format(
            user.name,
            expected_num_docs,
            len(changes_doc_ids),
            num_doc_removed
        ))

        # Print any error that may have occured
        error_count = 0
        for key, val in errors.items():
            if val != 0:
                log_error("<!> VERIFY ERROR - name: {}: occurences: {}".format(key, val))
                error_count += 1

        assert error_count == 0
Ejemplo n.º 10
0
def verify_changes(users, expected_num_docs, expected_num_revisions, expected_docs, ignore_rev_ids=False):

    # When users create or update a doc on sync_gateway, the response of the REST call
    # is stored in the users cache. 'expected_docs' is a scenario level dictionary created
    # from the combination of these user caches. This is used to create expected results
    # when comparing against the changes feed for each user.

    errors = {
        "unexpected_changes_length": 0,
        "invalid_expected_docs_length": 0,
        "duplicate_expected_ids": 0,
        "duplicate_changes_doc_ids": 0,
        "expected_doc_ids_differ_from_changes_doc_ids": 0,
        "invalid_rev_id": 0,
        "unexpected_rev_id_prefix": 0,
        "unexpected_num_updates": 0
    }

    if type(users) is list:
        user_list = users
    else:
        # Allow a single user to be passed
        user_list = list()
        user_list.append(users)

    if type(expected_docs) is not dict:
        log_error("expected_docs is not a dictionary")
        raise Exception("Make sure 'expected_docs' is a dictionary")

    for user in user_list:

        changes = user.get_changes(include_docs=True)
        results = changes["results"]

        changes_results = list()
        for result in results:
            changes_result = dict()
            if not result["id"].startswith("_user"):
                changes_result["id"] = result["doc"]["_id"]
                changes_result["rev"] = result["doc"]["_rev"]
                changes_result["updates"] = result["doc"]["updates"]
                changes_results.append(changes_result)

        # Check expected_num_docs matches number of changes results
        if expected_num_docs != len(changes_results):
            log_error("{0} -> {1} expected_num_docs != {2} len(changes_results)".format(user.name, expected_num_docs, len(changes_results)))
            errors["unexpected_changes_length"] += 1

        # Check number of expected num docs matched number of expected doc ids
        if expected_num_docs != len(expected_docs):
            log_error("{0} -> {1} expected_num_docs != {2} len(expected_docs)".format(user.name, expected_num_docs, len(expected_docs)))
            errors["invalid_expected_docs_length"] += 1

        # Get ids from expected docs
        expected_doc_ids = expected_docs.keys()

        # Assert there are no duplicates in expected doc ids
        if len(expected_doc_ids) != len(set(expected_doc_ids)):
            log_error("{0} -> Duplicates found in expected_doc_ids".format(user.name))
            errors["duplicate_expected_ids"] += 1

        # Get ids from all changes results
        changes_doc_ids = [result["id"] for result in changes_results]

        # Assert there are no duplicates in changes doc ids
        if len(changes_doc_ids) != len(set(changes_doc_ids)):
            log_error("{0} -> Duplicates found in changes doc ids".format(user.name))
            errors["duplicate_changes_doc_ids"] += 1

        # Assert the expected doc ids and changes doc ids are the same
        if set(expected_doc_ids) != set(changes_doc_ids):
            log_error("{0} -> changes feed doc ids differ from expected doc ids".format(user.name))
            different_docs = set(expected_doc_ids) - set(changes_doc_ids)
            log_error("{0} -> Set difference {1}".format(user.name, different_docs))
            errors["expected_doc_ids_differ_from_changes_doc_ids"] += 1

        if ignore_rev_ids:
            log_warn("WARNING: Ignoring rev id verification!!")

        for result in changes_results:
            if not ignore_rev_ids:
                # Compare revision number for id
                if expected_docs[result["id"]] != result["rev"]:
                    errors["invalid_rev_id"] += 1

            # IMPORTANT - This assumes that no conflicts are created via new_edits in the doc PUT
            # Assert that the revision id prefix matches the number of expected revisions
            rev_id_prefix = result["rev"].split("-")[0]

            # rev-id prefix will be 1 when document is created
            # For any non-conflicting update, it will be incremented by one
            if expected_num_revisions != int(rev_id_prefix) - 1:
                log_error("{0} -> expected_num_revisions {1} does not match stored rev_id_prefix: {2}".format(user.name, expected_num_revisions, rev_id_prefix))
                errors["unexpected_rev_id_prefix"] += 1

            # Check number of expected updates matched the updates on the _changes doc
            if expected_num_revisions != result["updates"]:
                log_error("{0} -> expected_num_revisions {1} does not match number of updates {2}".format(user.name, expected_num_revisions, result["updates"]))
                errors["unexpected_num_updates"] += 1

        # Allow printing updates even if changes feed length is 0
        if len(changes_results) == 0:
            updates = 0
        else:
            updates = changes_results[0]["updates"]

        log_info(" -> |{0}| expected (num_docs: {1} num_revisions: {2}) _changes (num_docs: {3} updates: {4})".format(
            user.name,
            expected_num_docs,
            expected_num_revisions,
            len(changes_doc_ids),
            updates
        ))

        # Print any error that may have occured
        error_count = 0
        for key, val in errors.items():
            if val != 0:
                log_error("<!> VERIFY ERROR - name: {}: occurences: {}".format(key, val))
                error_count += 1

        assert error_count == 0
Ejemplo n.º 11
0
def write_config(config, pool_file, use_docker, sg_windows, sg_accel_windows):

    connection_string = ""
    if use_docker:
        connection_string = "ansible_connection=docker"

    ips, ip_to_node_type = get_hosts(pool_file)
    ip_to_node_type_len = len(ip_to_node_type)
    ip_to_node_type_defined = False

    resource_folder = os.path.dirname(pool_file)

    log_info("ips: {}".format(ips))

    if len(ips) < config.num_machines_required():
        log_warn(
            "WARNING: Skipping config {} since {} machines required, but only {} provided"
            .format(config.name, config.num_machines_required(), len(ips)))
        return

    if ip_to_node_type_len > 0:
        ip_to_node_type_defined = True

    # Check for number of IPs versus number of IPs in ip_to_node_type
    if ip_to_node_type and len(ip_to_node_type) != len(ips):
        raise Exception(
            "Number of IPs in resources/pool:ips and ip_to_node_type do not match. Exiting ..."
        )

    log_info("\nGenerating config: {}".format(config.name))

    ansible_cluster_conf_file = resource_folder + "/cluster_configs/{}".format(
        config.name)
    cluster_json_file = resource_folder + "/cluster_configs/{}.json".format(
        config.name)

    with open(ansible_cluster_conf_file, "w") as f:

        hosts = []
        couchbase_servers = []
        sync_gateways = []
        accels = []
        load_generators = []
        load_balancers = []

        f.write("[pool]\n")
        count = 1
        for ip in ips:
            f.write("ma{} ansible_host={} {}\n".format(count, ip,
                                                       connection_string))
            hosts.append({"name": "host{}".format(count), "ip": ip})
            count += 1

        f.write("\n")
        f.write("\n")

        # Write Servers
        cbs_ips_to_remove = []
        f.write("[couchbase_servers]\n")
        for i in range(config.num_cbs):
            # Check if the IP is present in the ip_to_node_type

            j = 0
            found = False
            while ip_to_node_type_defined and j < len(ips):
                if ips[j] not in ip_to_node_type:
                    raise Exception("{} not in ip_to_node_type".format(ips[j]))

                if ip_to_node_type[ips[j]] != "couchbase_servers" or ips[
                        j] in cbs_ips_to_remove:
                    # IP is not a cbs or if the cbs is already recorded
                    j += 1
                    continue
                else:
                    found = True
                    break

            # Check if the number of cbs in the ip_to_node_type match the config
            if ip_to_node_type_defined and not found:
                log_warn(
                    "WARNING: Skipping config {} since {} couchbase_servers required, but only {} provided"
                    .format(config.name, config.num_cbs,
                            len(cbs_ips_to_remove)))

                # Sometimes the config file is partially generated, correct sg but invalid cb etc.
                log_warn("WARNING: Removing the partially generated config {}".
                         format(config.name))
                os.unlink(f.name)

                return

            # j is the counter for ip_to_node_type which is invalid if not defined
            if ip_to_node_type_defined:
                ip = ips[j]
            else:
                ip = ips[i]

            f.write("cb{} ansible_host={} {}\n".format(i + 1, ip,
                                                       connection_string))
            couchbase_servers.append({"name": "cb{}".format(i + 1), "ip": ip})
            cbs_ips_to_remove.append(ip)

        for cbs_ip in cbs_ips_to_remove:
            ips.remove(cbs_ip)

        f.write("\n")

        # Write sync_gateways
        f.write("[sync_gateways]\n")
        sg_ips_to_remove = []
        for i in range(config.num_sgs):
            # Check if the IP is present in the ip_to_node_type
            j = 0
            found = False

            while ip_to_node_type_defined and j < len(ips):
                if ips[j] not in ip_to_node_type:
                    raise Exception("{} not in ip_to_node_type".format(ips[j]))

                if ip_to_node_type[ips[j]] != "sync_gateways" or ips[
                        j] in sg_ips_to_remove:
                    # IP is not a sg or if the sg is already recorded
                    j += 1
                    continue
                else:
                    found = True
                    break

            # Check if the number of sgs in the ip_to_node_type match the config
            if ip_to_node_type_defined and not found:
                log_warn(
                    "WARNING: Skipping config {} since {} sync_gateways required, but only {} provided"
                    .format(config.name, config.num_sgs,
                            len(sg_ips_to_remove)))

                # Sometimes the config file is partially generated, correct cbs but invalid sg etc.
                log_warn("WARNING: Removing the partially generated config {}".
                         format(config.name))
                os.unlink(f.name)

                return

            # j is the counter for ip_to_node_type which is invalid if not defined
            if ip_to_node_type_defined:
                ip = ips[j]
            else:
                ip = ips[i]

            f.write("sg{} ansible_host={} {}\n".format(i + 1, ip,
                                                       connection_string))
            sync_gateways.append({"name": "sg{}".format(i + 1), "ip": ip})
            sg_ips_to_remove.append(ip)

        for sg_ip in sg_ips_to_remove:
            print "REMOVING {} and {} from {}".format(sg_ip, sg_ips_to_remove,
                                                      ips)
            ips.remove(sg_ip)

        f.write("\n")

        # Write sg_accels
        ac_ips_to_remove = []
        f.write("[sg_accels]\n")
        for i in range(config.num_acs):
            # Check if the IP is present in the ip_to_node_type
            j = 0
            found = False

            while ip_to_node_type_defined and j < len(ips):
                if ips[j] not in ip_to_node_type:
                    raise Exception("{} not in ip_to_node_type".format(ips[j]))

                if ip_to_node_type[
                        ips[j]] != "sg_accels" or ips[j] in ac_ips_to_remove:
                    # IP is not a ac or if the ac is already recorded
                    j += 1
                    continue
                else:
                    found = True
                    break

            # Check if the number of acs in the ip_to_node_type match the config
            if ip_to_node_type_defined and not found:
                log_warn(
                    "WARNING: Skipping config {} since {} sg_accels required, but only {} provided"
                    .format(config.name, config.num_acs,
                            len(ac_ips_to_remove)))

                # Sometimes the config file is partially generated, correct cbs but invalid ac etc.
                log_warn("WARNING: Removing the partially generated config {}".
                         format(config.name))
                os.unlink(f.name)

                return

            # j is the counter for ip_to_node_type which is invalid if not defined
            if ip_to_node_type_defined:
                ip = ips[j]
            else:
                ip = ips[i]

            f.write("ac{} ansible_host={} {}\n".format(i + 1, ip,
                                                       connection_string))
            accels.append({"name": "ac{}".format(i + 1), "ip": ip})
            ac_ips_to_remove.append(ip)

        for ac_ip in ac_ips_to_remove:
            ips.remove(ac_ip)

        f.write("\n")

        # Write load generators
        lg_ips_to_remove = []
        f.write("[load_generators]\n")
        for i in range(config.num_lgs):
            # Check if the IP is present in the ip_to_node_type
            j = 0
            found = False
            while ip_to_node_type_defined and j < len(ips):
                if ips[j] not in ip_to_node_type:
                    raise Exception("{} not in ip_to_node_type".format(ips[j]))

                if ip_to_node_type[ips[j]] != "load_generators" or ips[
                        j] in lg_ips_to_remove:
                    # IP is not a lg or if the lg is already recorded
                    j += 1
                    continue
                else:
                    found = True
                    break

            # Check if the number of lgs in the ip_to_node_type match the config
            if ip_to_node_type_defined and not found:
                log_warn(
                    "WARNING: Skipping config {} since {} load_generators required, but only {} provided"
                    .format(config.name, config.num_lgs,
                            len(lg_ips_to_remove)))

                # Sometimes the config file is partially generated, correct cbs but invalid lg etc.
                log_warn("WARNING: Removing the partially generated config {}".
                         format(config.name))
                os.unlink(f.name)

                return

            # j is the counter for ip_to_node_type which is invalid if not defined
            if ip_to_node_type_defined:
                ip = ips[j]
            else:
                ip = ips[i]

            f.write("lg{} ansible_host={} {}\n".format(i + 1, ip,
                                                       connection_string))
            load_generators.append({"name": "lg{}".format(i + 1), "ip": ip})
            lg_ips_to_remove.append(ip)

        for lg_ip in lg_ips_to_remove:
            ips.remove(lg_ip)

        f.write("\n")

        # Write load balancers
        lb_ips_to_remove = []
        f.write("[load_balancers]\n")
        for i in range(config.num_lbs):
            # Check if the IP is present in the ip_to_node_type
            j = 0
            found = False
            while ip_to_node_type_defined and j < len(ips):
                if ips[j] not in ip_to_node_type:
                    raise Exception("{} not in ip_to_node_type".format(ips[j]))

                if ip_to_node_type[ips[j]] != "load_balancers" or ips[
                        j] in lb_ips_to_remove:
                    # IP is not a lb or if the lb is already recorded
                    j += 1
                    continue
                else:
                    found = True
                    break

            # Check if the number of lbs in the ip_to_node_type match the config
            if ip_to_node_type_defined and not found:
                log_warn(
                    "WARNING: Skipping config {} since {} load_balancers required, but only {} provided"
                    .format(config.name, config.num_lbs,
                            len(lb_ips_to_remove)))

                # Sometimes the config file is partially generated, correct cbs but invalid lb etc.
                log_warn("WARNING: Removing the partially generated config {}".
                         format(config.name))
                os.unlink(f.name)

                return

            # j is the counter for ip_to_node_type which is invalid if not defined
            if ip_to_node_type_defined:
                ip = ips[j]
            else:
                ip = ips[i]

            f.write("lb{} ansible_host={} {}\n".format(i + 1, ip,
                                                       connection_string))
            load_balancers.append({"name": "lb{}".format(i + 1), "ip": ip})
            lb_ips_to_remove.append(ip)

        for lb_ip in lb_ips_to_remove:
            ips.remove(lb_ip)

        f.write("\n")

        # Get local address to run webhook server on
        # TODO: make the webhook receiver it's own endpoint, or come up w/ better design.
        try:
            f.write("[webhook_ip]\n")
            # HACK: http://stackoverflow.com/questions/166506/finding-local-ip-addresses-using-pythons-stdlib
            # Connect to Google's public DNS server and get the socketname tuple (<local_ip_address>, <port>)
            # The 'local_ip_address' is the ip of the machine on the LAN. This will be used to run mock server
            # for the web hook tests. It will be exposed on the LAN so that other machines on the LAN can connect to it
            s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
            s.connect(("8.8.8.8", 80))
            local_ip = s.getsockname()[0]
            s.close()

            log_info("webhook ip: {}".format(local_ip))
            f.write("tf1 ansible_host={} {}".format(local_ip,
                                                    connection_string))
        except Exception as e:
            log_error(
                "Failed to find local_ip, webhook tests will fail.  Error: {}".
                format(e))

        f.write("\n\n[environment]\n")
        f.write("cbs_ssl_enabled=False\n")
        f.write("xattrs_enabled=False\n")
        f.write("sg_lb_enabled=False\n")

        if sg_windows:
            f.write("\n\n[sync_gateways:vars]\n")
            f.write("ansible_user=FakeUser\n")
            f.write("ansible_password=FakePassword\n")
            f.write("ansible_port=5986\n")
            f.write("ansible_connection=winrm\n")
            f.write("ansible_winrm_server_cert_validation=ignore\n")

        if sg_accel_windows:
            f.write("\n\n[sg_accels:vars]\n")
            f.write("ansible_user=FakeUser\n")
            f.write("ansible_password=FakePassword\n")
            f.write("ansible_port=5986\n")
            f.write("ansible_connection=winrm\n")
            f.write("ansible_winrm_server_cert_validation=ignore\n")

        log_info("Generating {}.json".format(config.name))

        # Write json file consumable by testkit.cluster class
        cluster_dict = {
            "hosts": hosts,
            "couchbase_servers": couchbase_servers,
            "sync_gateways": sync_gateways,
            "sg_accels": accels,
            "load_generators": load_generators,
            "load_balancers": load_balancers,
            "environment": {
                "cbs_ssl_enabled": False,
                "xattrs_enabled": False,
                "sg_lb_enabled": False
            }
        }

        with open(cluster_json_file, "w") as f_json:
            f_json.write(json.dumps(cluster_dict, indent=4))
def test_mulitple_users_mulitiple_channels_mulitple_revisions(
        params_from_base_test_setup, sg_conf_name, num_users, num_channels,
        num_docs, num_revisions):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'mulitple_users_mulitiple_channels_mulitple_revisions'")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("sg_conf: {}".format(sg_conf))
    log_info("num_users: {}".format(num_users))
    log_info("num_channels: {}".format(num_channels))
    log_info("num_docs: {}".format(num_docs))
    log_info("num_revisions: {}".format(num_revisions))

    start = time.time()

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    init_completed = time.time()
    log_info("Initialization completed. Time taken:{}s".format(init_completed -
                                                               start))

    channels = ["channel-" + str(i) for i in range(num_channels)]
    password = "******"

    sgs = cluster.sync_gateways

    admin = Admin(sgs[0])

    # Register User
    log_info("Register User")
    user_objects = admin.register_bulk_users(target=sgs[0],
                                             db="db",
                                             name_prefix="User",
                                             number=num_users,
                                             password=password,
                                             channels=channels)

    # Add User
    log_info("Add docs")
    in_parallel(user_objects, 'add_docs', num_docs)

    # Update docs
    log_info("Update docs")
    in_parallel(user_objects, 'update_docs', num_revisions)

    # Adding sleep to let sg to catch-up...
    # Without sleep this test fails in Channel-Cache mode and changes feed doesn't return the expected
    # num_revisions in docs.
    # The test passes in Distributed-Index mode.
    time.sleep(10)

    # Get changes for all users
    in_parallel(user_objects, 'get_changes')

    # every user should have same number of docs
    # total/expected docs = num_users * num_docs
    recieved_docs = in_parallel(user_objects, 'get_num_docs')

    expected_docs = num_users * num_docs
    for user_obj, docs in recieved_docs.items():
        log_info('User {} got {} docs, expected docs: {}'.format(
            user_obj.name, docs, expected_docs))
        assert docs == expected_docs

    # Verify that
    # user created doc-ids exist in docs received in changes feed
    # expected revision is equal to received revision
    expected_revision = str(num_revisions + 1)
    docs_rev_dict = in_parallel(user_objects, 'get_num_revisions')
    rev_errors = []
    for user_obj, docs_revision_dict in docs_rev_dict.items():
        for doc_id in docs_revision_dict.keys():
            rev = docs_revision_dict[doc_id]
            log_info(
                'User {} doc_id {} has {} revisions, expected revision: {}'.
                format(user_obj.name, doc_id, rev, expected_revision))
            if rev != expected_revision:
                rev_errors.append(doc_id)
                log_error(
                    'User {} doc_id {} got revision {}, expected revision {}'.
                    format(user_obj.name, doc_id, rev, expected_revision))

    assert len(rev_errors) == 0

    # Verify each User created docs are part of changes feed
    output = in_parallel(user_objects, 'check_doc_ids_in_changes_feed')
    assert True in output.values()

    end = time.time()
    log_info("Test ended.")
    log_info("Main test duration: {}".format(end - init_completed))
    log_info("Test setup time: {}".format(init_completed - start))
    log_info("Total Time taken: {}s".format(end - start))
def test_bucket_online_offline_resync_with_offline(params_from_base_test_setup,
                                                   sg_conf_name, num_users,
                                                   num_docs, num_revisions):
    start = time.time()

    cluster_conf = params_from_base_test_setup["cluster_config"]
    test_mode = params_from_base_test_setup["mode"]

    if test_mode == "di":
        pytest.skip("Unsupported feature in distributed index")

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, test_mode)

    log_info("Running 'test_bucket_online_offline_resync_with_online'")
    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using num_users: {}".format(num_users))
    log_info("Using num_docs: {}".format(num_docs))
    log_info("Using num_revisions: {}".format(num_revisions))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_conf)

    init_completed = time.time()
    log_info("Initialization completed. Time taken:{}s".format(init_completed -
                                                               start))

    num_channels = 1
    channels = ["channel-" + str(i) for i in range(num_channels)]
    password = "******"

    sgs = cluster.sync_gateways

    admin = Admin(sgs[0])

    # Register User
    log_info("Register User")
    user_objects = admin.register_bulk_users(target=sgs[0],
                                             db="db",
                                             name_prefix="User",
                                             number=num_users,
                                             password=password,
                                             channels=channels)
    user_x = admin.register_user(target=sgs[0],
                                 db="db",
                                 name="User-X",
                                 password="******",
                                 channels=["channel_x"])

    # Add User
    log_info("Add docs")
    in_parallel(user_objects, 'add_docs', num_docs)

    # Update docs
    log_info("Update docs")
    in_parallel(user_objects, 'update_docs', num_revisions)

    time.sleep(10)

    # Get changes for all users
    in_parallel(user_objects, 'get_changes')

    # every user should have same number of docs
    # total/expected docs = num_users * num_docs
    recieved_docs = in_parallel(user_objects, 'get_num_docs')

    expected_docs = num_users * num_docs
    for user_obj, docs in recieved_docs.items():
        log_info('User {} got {} docs, expected docs: {}'.format(
            user_obj.name, docs, expected_docs))
        assert docs == expected_docs

    # Verify that
    # user created doc-ids exist in docs received in changes feed
    # expected revision is equal to received revision
    expected_revision = str(num_revisions + 1)
    docs_rev_dict = in_parallel(user_objects, 'get_num_revisions')
    rev_errors = []
    for user_obj, docs_revision_dict in docs_rev_dict.items():
        for doc_id in docs_revision_dict.keys():
            rev = docs_revision_dict[doc_id]
            log_info(
                'User {} doc_id {} has {} revisions, expected revision: {}'.
                format(user_obj.name, doc_id, rev, expected_revision))
            if rev != expected_revision:
                rev_errors.append(doc_id)
                log_error(
                    'User {} doc_id {} got revision {}, expected revision {}'.
                    format(user_obj.name, doc_id, rev, expected_revision))

    assert len(rev_errors) == 0

    # Verify each User created docs are part of changes feed
    output = in_parallel(user_objects, 'check_doc_ids_in_changes_feed')
    assert True in output.values()

    # Take "db" offline
    sg_client = MobileRestClient()
    status = sg_client.take_db_offline(cluster_conf=cluster_conf, db="db")
    assert status == 0

    sg_restart_config = sync_gateway_config_path_for_mode(
        "bucket_online_offline/db_online_offline_access_restricted", test_mode)
    restart_status = cluster.sync_gateways[0].restart(sg_restart_config)
    assert restart_status == 0

    log_info("Sleeping....")
    time.sleep(10)
    pool = ThreadPool(processes=1)

    log_info("Restarted SG....")
    time.sleep(5)

    db_info = admin.get_db_info("db")
    log_info("Status of db = {}".format(db_info["state"]))
    assert db_info["state"] == "Offline"

    try:
        async_resync_result = pool.apply_async(admin.db_resync, ("db", ))
        log_info("resync issued !!!!!!")
    except Exception as e:
        log_info("Catch resync exception: {}".format(e))

    time.sleep(1)
    resync_occured = False

    for i in range(20):
        db_info = admin.get_db_info("db")
        log_info("Status of db = {}".format(db_info["state"]))
        if db_info["state"] == "Resyncing":
            resync_occured = True
            log_info("Resync occured")
            try:
                status = admin.get_db_info(db="db")
                log_info("Got db_info request status: {}".format(status))
            except HTTPError as e:
                log_info("status = {} exception = {}".format(
                    status, e.response.status_code))
                assert False
            else:
                log_info("Got 200 ok for supported operation")

        time.sleep(1)
        if resync_occured:
            break

    time.sleep(10)

    status = sg_client.bring_db_online(cluster_conf=cluster_conf, db="db")

    log_info("online request issued !!!!! response status: {}".format(status))

    time.sleep(5)
    db_info = admin.get_db_info("db")
    log_info("Status of db = {}".format(db_info["state"]))
    assert db_info["state"] == "Online"

    resync_result = async_resync_result.get()
    log_info("resync_changes {}".format(resync_result))
    log_info("expecting num_changes  == num_docs {} * num_users {}".format(
        num_docs, num_users))
    assert resync_result['payload']['changes'] == num_docs * num_users
    assert resync_result['status_code'] == 200

    time.sleep(5)
    global_cache = list()
    for user in user_objects:
        global_cache.append(user.cache)

    all_docs = {
        k: v
        for user_cache in global_cache for k, v in user_cache.items()
    }

    verify_changes(user_x,
                   expected_num_docs=expected_docs,
                   expected_num_revisions=num_revisions,
                   expected_docs=all_docs)

    end = time.time()
    log_info("Test ended.")
    log_info("Main test duration: {}".format(end - init_completed))
    log_info("Test setup time: {}".format(init_completed - start))
    log_info("Total Time taken: {}s".format(end - start))
def test_bucket_online_offline_resync_sanity(params_from_base_test_setup,
                                             sg_conf_name, num_users, num_docs,
                                             num_revisions):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    test_mode = params_from_base_test_setup["mode"]

    if test_mode == "di":
        pytest.skip("Unsupported feature in distributed index")

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, test_mode)

    log_info("Running 'test_bucket_online_offline_resync_sanity'")
    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using num_users: {}".format(num_users))
    log_info("Using num_docs: {}".format(num_docs))
    log_info("Using num_revisions: {}".format(num_revisions))

    start = time.time()

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_conf)

    init_completed = time.time()
    log_info("Initialization completed. Time taken:{}s".format(init_completed -
                                                               start))

    num_channels = 1
    channels = ["channel-" + str(i) for i in range(num_channels)]
    password = "******"

    sgs = cluster.sync_gateways

    admin = Admin(sgs[0])

    # Register User
    log_info("Register User")
    user_objects = admin.register_bulk_users(target=sgs[0],
                                             db="db",
                                             name_prefix="User",
                                             number=num_users,
                                             password=password,
                                             channels=channels)
    user_x = admin.register_user(target=sgs[0],
                                 db="db",
                                 name="User-X",
                                 password="******",
                                 channels=["channel_x"])

    # Add User
    log_info("Add docs")
    in_parallel(user_objects, 'add_docs', num_docs)

    # Update docs
    log_info("Update docs")
    in_parallel(user_objects, 'update_docs', num_revisions)

    time.sleep(10)

    # Get changes for all users
    in_parallel(user_objects, 'get_changes')

    # every user should have same number of docs
    # total/expected docs = num_users * num_docs
    recieved_docs = in_parallel(user_objects, 'get_num_docs')

    expected_docs = num_users * num_docs
    for user_obj, docs in recieved_docs.items():
        log_info('User {} got {} docs, expected docs: {}'.format(
            user_obj.name, docs, expected_docs))
        assert docs == expected_docs

    # Verify that
    # user created doc-ids exist in docs received in changes feed
    # expected revision is equal to received revision
    expected_revision = str(num_revisions + 1)
    docs_rev_dict = in_parallel(user_objects, 'get_num_revisions')
    rev_errors = []
    for user_obj, docs_revision_dict in docs_rev_dict.items():
        for doc_id in docs_revision_dict.keys():
            rev = docs_revision_dict[doc_id]
            log_info(
                'User {} doc_id {} has {} revisions, expected revision: {}'.
                format(user_obj.name, doc_id, rev, expected_revision))
            if rev != expected_revision:
                rev_errors.append(doc_id)
                log_error(
                    'User {} doc_id {} got revision {}, expected revision {}'.
                    format(user_obj.name, doc_id, rev, expected_revision))

    assert len(rev_errors) == 0

    # Verify each User created docs are part of changes feed
    output = in_parallel(user_objects, 'check_doc_ids_in_changes_feed')
    assert True in output.values()

    # Take "db" offline
    sg_client = MobileRestClient()
    status = sg_client.take_db_offline(cluster_conf=cluster_conf, db="db")
    assert status == 0

    sg_restart_config = sync_gateway_config_path_for_mode(
        "bucket_online_offline/db_online_offline_access_restricted", test_mode)
    restart_status = cluster.sync_gateways[0].restart(sg_restart_config)
    assert restart_status == 0

    time.sleep(10)

    num_changes = admin.db_resync(db="db")
    log_info("expecting num_changes {} == num_docs {} * num_users {}".format(
        num_changes, num_docs, num_users))
    assert num_changes['payload']['changes'] == num_docs * num_users

    # Take "db" online
    status = sg_client.bring_db_online(cluster_conf=cluster_conf, db="db")
    assert status == 0

    time.sleep(5)
    global_cache = list()
    for user in user_objects:
        global_cache.append(user.cache)

    all_docs = {
        k: v
        for user_cache in global_cache for k, v in user_cache.items()
    }

    verify_changes(user_x,
                   expected_num_docs=expected_docs,
                   expected_num_revisions=num_revisions,
                   expected_docs=all_docs)

    end = time.time()
    log_info("Test ended.")
    log_info("Main test duration: {}".format(end - init_completed))
    log_info("Test setup time: {}".format(init_completed - start))
    log_info("Total Time taken: {}s".format(end - start))
def write_config(config, pool_file):
    ips = get_ips(pool_file)
    log_info("ips: {}".format(ips))

    if len(ips) < config.num_machines_required():
        log_warn("WARNING: Skipping config {} since {} machines required, but only {} provided".format(
            config.name,
            config.num_machines_required(),
            len(ips))
        )
        return

    log_info("\nGenerating config: {}".format(config.name))

    ansible_cluster_conf_file = "resources/cluster_configs/{}".format(config.name)
    cluster_json_file = "resources/cluster_configs/{}.json".format(config.name)

    with open(ansible_cluster_conf_file, "w") as f:

        hosts = []
        couchbase_servers = []
        sync_gateways = []
        accels = []
        load_generators = []
        load_balancers = []

        f.write("[pool]\n")
        count = 1
        for ip in ips:
            f.write("ma{} ansible_host={}\n".format(count, ip))
            hosts.append({
                "name": "host{}".format(count),
                "ip": ip
            })
            count += 1

        f.write("\n")
        f.write("\n")

        # Write Servers
        cbs_ips_to_remove = []
        f.write("[couchbase_servers]\n")
        for i in range(config.num_cbs):
            ip = ips[i]
            f.write("cb{} ansible_host={}\n".format(i + 1, ip))
            couchbase_servers.append({
                "name": "cb{}".format(i + 1),
                "ip": ip
            })
            cbs_ips_to_remove.append(ip)

        for cbs_ip in cbs_ips_to_remove:
            ips.remove(cbs_ip)

        f.write("\n")

        # Write sync_gateways
        f.write("[sync_gateways]\n")
        sg_ips_to_remove = []
        for i in range(config.num_sgs):
            ip = ips[i]
            f.write("sg{} ansible_host={}\n".format(i + 1, ip))
            sync_gateways.append({
                "name": "sg{}".format(i + 1),
                "ip": ip
            })
            sg_ips_to_remove.append(ip)

        for sg_ip in sg_ips_to_remove:
            ips.remove(sg_ip)

        f.write("\n")

        # Write sg_accels
        ac_ips_to_remove = []
        f.write("[sg_accels]\n")
        for i in range(config.num_acs):
            ip = ips[i]
            f.write("ac{} ansible_host={}\n".format(i + 1, ip))
            accels.append({
                "name": "ac{}".format(i + 1),
                "ip": ip
            })
            ac_ips_to_remove.append(ip)

        for ac_ip in ac_ips_to_remove:
            ips.remove(ac_ip)

        f.write("\n")

        # Write load generators
        lg_ips_to_remove = []
        f.write("[load_generators]\n")
        for i in range(config.num_lgs):
            ip = ips[i]
            f.write("lg{} ansible_host={}\n".format(i + 1, ip))
            load_generators.append({
                "name": "lg{}".format(i + 1),
                "ip": ip
            })
            lg_ips_to_remove.append(ip)

        for lg_ip in lg_ips_to_remove:
            ips.remove(lg_ip)

        f.write("\n")

        # Write load balancers
        lb_ips_to_remove = []
        f.write("[load_balancers]\n")
        for i in range(config.num_lbs):
            ip = ips[i]
            f.write("lb{} ansible_host={}\n".format(i + 1, ip))
            load_balancers.append({
                "name": "lb{}".format(i + 1),
                "ip": ip
            })
            lb_ips_to_remove.append(ip)

        for lb_ip in lb_ips_to_remove:
            ips.remove(lb_ip)

        f.write("\n")

        # Get local address to run webhook server on
        # TODO: make the webhook receiver it's own endpoint, or come up w/ better design.
        try:
            f.write("[webhook_ip]\n")
            # HACK: http://stackoverflow.com/questions/166506/finding-local-ip-addresses-using-pythons-stdlib
            # Connect to Google's public DNS server and get the socketname tuple (<local_ip_address>, <port>)
            # The 'local_ip_address' is the ip of the machine on the LAN. This will be used to run mock server
            # for the web hook tests. It will be exposed on the LAN so that other machines on the LAN can connect to it
            s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
            s.connect(("8.8.8.8", 80))
            local_ip = s.getsockname()[0]
            s.close()

            log_info("webhook ip: {}".format(local_ip))
            f.write("tf1 ansible_host={}".format(local_ip))
        except Exception as e:
            log_error("Failed to find local_ip, webhook tests will fail.  Error: {}".format(e))

        log_info("Generating {}.json".format(config.name))

        # Write json file consumable by testkit.cluster class
        cluster_dict = {
            "hosts": hosts,
            "couchbase_servers": couchbase_servers,
            "sync_gateways": sync_gateways,
            "sg_accels": accels,
            "load_generators": load_generators,
            "load_balancers": load_balancers
        }

        with open(cluster_json_file, "w") as f_json:
            f_json.write(json.dumps(cluster_dict, indent=4))