Пример #1
0
def test_peer_2_peer_sanity_pull(setup_p2p_test, num_docs_per_db, seeded_db, attachments_generator):
    """
    1. Create ls_db1 database on LiteServ One
    2. Create ls_db2 database on LiteServ Two
    3. Create continuous pull replication LiteServ 1 ls_db1 <- LiteServ 2 ls_db2
    4. Add 5000 docs to LiteServ 2 ls_db2
    5. Verify all docs replicate to LiteServ 1 ls_db1
    6. Verify all docs show up in changes for LiteServ 1 ls_db1
    """

    ls_url_one = setup_p2p_test["ls_url_one"]
    ls_url_two = setup_p2p_test["ls_url_two"]

    log_info("ls_url_one: {}".format(ls_url_one))
    log_info("ls_url_two: {}".format(ls_url_two))

    attachments = False

    if attachments_generator:
        log_info("Running test_peer_2_peer_sanity_pull with attachment {}".format(attachments_generator))
        attachments = True

    client = MobileRestClient()

    log_info("Creating databases")
    ls_db1 = client.create_database(url=ls_url_one, name="ls_db1")
    ls_db2 = client.create_database(url=ls_url_two, name="ls_db2")

    if seeded_db:
        bulk_docs = create_docs("test_ls_db2_seed", num_docs_per_db)
        ls_db2_docs_seed = client.add_bulk_docs(url=ls_url_two, db=ls_db2, docs=bulk_docs)
        assert len(ls_db2_docs_seed) == num_docs_per_db

    # Setup continuous pull replication from LiteServ 2 ls_db2 to LiteServ 1 ls_db1
    pull_repl = client.start_replication(
        url=ls_url_one,
        continuous=True,
        from_url=ls_url_two, from_db=ls_db2,
        to_db=ls_db1
    )

    client.wait_for_replication_status_idle(url=ls_url_one, replication_id=pull_repl)

    ls_db2_docs = client.add_docs(url=ls_url_two, db=ls_db2, number=num_docs_per_db, id_prefix="test_ls_db2", attachments_generator=attachments_generator)
    assert len(ls_db2_docs) == num_docs_per_db

    client.verify_docs_present(url=ls_url_one, db=ls_db1, expected_docs=ls_db2_docs, attachments=attachments)
    client.verify_docs_in_changes(url=ls_url_one, db=ls_db1, expected_docs=ls_db2_docs)

    total_ls_db2_docs = ls_db2_docs
    if seeded_db:
        total_ls_db2_docs += ls_db2_docs_seed

    client.verify_docs_present(url=ls_url_one, db=ls_db1, expected_docs=total_ls_db2_docs, attachments=attachments)
    client.verify_docs_in_changes(url=ls_url_one, db=ls_db1, expected_docs=total_ls_db2_docs)
def test_auto_prune_with_pull(setup_client_syncgateway_test):
    """Sanity test for autopruning with replication

    1. Create a database on LiteServ (ls_db)
    2. Add doc to sync gateway
    3. Update doc 50 times on sync_gateway
    4. Set up pull replication from sync_gateway db to LiteServ db
    5. Verify number of revisions on client is default (20)
    """

    cluster_config = setup_client_syncgateway_test["cluster_config"]
    ls_url = setup_client_syncgateway_test["ls_url"]
    sg_url = setup_client_syncgateway_test["sg_url"]
    sg_admin_url = setup_client_syncgateway_test["sg_admin_url"]

    client = MobileRestClient()
    sg_helper = SyncGateway()
    sg_helper.start_sync_gateway(
        cluster_config=cluster_config, url=sg_url, config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
    )

    log_info("Running 'test_auto_prune_listener_sanity' ...")
    log_info("ls_url: {}".format(ls_url))
    log_info("sg_url: {}".format(sg_url))
    log_info("sg_admin_url: {}".format(sg_admin_url))

    num_docs = 1
    num_revs = 50

    sg_user_channels = ["NBC"]
    sg_db = "db"
    sg_user_name = "sg_user"

    client.create_user(url=sg_admin_url, db=sg_db, name=sg_user_name, password="******", channels=sg_user_channels)
    sg_session = client.create_session(url=sg_admin_url, db=sg_db, name=sg_user_name)

    ls_db = client.create_database(url=ls_url, name="ls_db")

    sg_db_docs = client.add_docs(
        url=sg_url, db=sg_db, number=num_docs, id_prefix=sg_db, channels=sg_user_channels, auth=sg_session
    )
    assert len(sg_db_docs) == num_docs

    sg_docs_update = client.update_docs(url=sg_url, db=sg_db, docs=sg_db_docs, number_updates=num_revs, auth=sg_session)

    # Start continuous replication ls_db <- sg_db
    repl_one = client.start_replication(url=ls_url, continuous=True, from_url=sg_admin_url, from_db=sg_db, to_db=ls_db)

    client.wait_for_replication_status_idle(url=ls_url, replication_id=repl_one)
    client.verify_docs_present(url=ls_url, db=ls_db, expected_docs=sg_docs_update)
    client.verify_revs_num_for_docs(url=ls_url, db=ls_db, docs=sg_docs_update, expected_revs_per_doc=20)
def test_peer_2_peer_sanity_pull(setup_p2p_test):
    """
    1. Create ls_db1 database on LiteServ One
    2. Create ls_db2 database on LiteServ Two
    3. Create continuous pull replication LiteServ 1 ls_db1 <- LiteServ 2 ls_db2
    4. Add 5000 docs to LiteServ 2 ls_db2
    5. Verify all docs replicate to LiteServ 1 ls_db1
    6. Verify all docs show up in changes for LiteServ 1 ls_db1
    """

    ls_url_one = setup_p2p_test["ls_url_one"]
    ls_url_two = setup_p2p_test["ls_url_two"]

    num_docs_per_db = 5000

    log_info("ls_url_one: {}".format(ls_url_one))
    log_info("ls_url_two: {}".format(ls_url_two))

    client = MobileRestClient()

    log_info("Creating databases")
    ls_db1 = client.create_database(url=ls_url_one, name="ls_db1")
    ls_db2 = client.create_database(url=ls_url_two, name="ls_db2")

    # Setup continuous pull replication from LiteServ 2 ls_db2 to LiteServ 1 ls_db1

    pull_repl = client.start_replication(
        url=ls_url_one,
        continuous=True,
        from_url=ls_url_two, from_db=ls_db2,
        to_db=ls_db1
    )

    client.wait_for_replication_status_idle(url=ls_url_one, replication_id=pull_repl)

    ls_db2_docs = client.add_docs(url=ls_url_two, db=ls_db2, number=num_docs_per_db, id_prefix="test_ls_db2")
    assert len(ls_db2_docs) == num_docs_per_db

    client.verify_docs_present(url=ls_url_one, db=ls_db1, expected_docs=ls_db2_docs)
    client.verify_docs_in_changes(url=ls_url_one, db=ls_db1, expected_docs=ls_db2_docs)
def test_verify_open_revs_with_revs_limit_push_conflict(setup_client_syncgateway_test):
    """Test replication from multiple client dbs to one sync_gateway db

    https://github.com/couchbase/couchbase-lite-ios/issues/1277
    """

    cluster_config = setup_client_syncgateway_test["cluster_config"]
    ls_url = setup_client_syncgateway_test["ls_url"]
    sg_url = setup_client_syncgateway_test["sg_url"]
    sg_admin_url = setup_client_syncgateway_test["sg_admin_url"]

    num_docs = 100
    num_revs = 20

    sg_db = "db"
    sg_user_name = "sg_user"

    sg_helper = SyncGateway()
    sg_helper.start_sync_gateway(
        cluster_config=cluster_config,
        url=sg_url,
        config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
    )

    log_info("Running 'test_verify_open_revs_with_revs_limit_push_conflict'")
    log_info("ls_url: {}".format(ls_url))
    log_info("sg_admin_url: {}".format(sg_admin_url))
    log_info("sg_url: {}".format(sg_url))
    log_info("num_docs: {}".format(num_docs))
    log_info("num_revs: {}".format(num_revs))

    client = MobileRestClient()

    # Test the endpoint, listener does not support users but should have a default response
    client.get_session(url=ls_url)
    sg_user_channels = ["NBC"]
    client.create_user(url=sg_admin_url, db=sg_db, name=sg_user_name, password="******", channels=sg_user_channels)
    sg_session = client.create_session(url=sg_admin_url, db=sg_db, name=sg_user_name)

    ls_db = client.create_database(url=ls_url, name="ls_db")
    ls_db_docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs, id_prefix="ls_db", channels=sg_user_channels)
    assert len(ls_db_docs) == num_docs

    # Start replication ls_db -> sg_db
    repl_one = client.start_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_admin_url, to_db=sg_db
    )

    client.verify_docs_present(url=sg_admin_url, db=sg_db, expected_docs=ls_db_docs)

    client.update_docs(url=sg_url, db=sg_db, docs=ls_db_docs, number_updates=num_revs, auth=sg_session)
    sg_current_doc = client.get_doc(url=sg_url, db=sg_db, doc_id="ls_db_2", auth=sg_session)

    client.update_docs(url=ls_url, db=ls_db, docs=ls_db_docs, number_updates=num_revs)
    ls_current_doc = client.get_doc(url=ls_url, db=ls_db, doc_id="ls_db_2")

    client.wait_for_replication_status_idle(url=ls_url, replication_id=repl_one)

    client.verify_doc_rev_generation(url=ls_url, db=ls_db, doc_id=ls_current_doc["_id"], expected_generation=21)
    client.verify_doc_rev_generation(url=sg_url, db=sg_db, doc_id=sg_current_doc["_id"], expected_generation=21, auth=sg_session)

    expected_ls_revs = [ls_current_doc["_rev"]]
    client.verify_open_revs(url=ls_url, db=ls_db, doc_id=ls_current_doc["_id"], expected_open_revs=expected_ls_revs)

    expected_sg_revs = [ls_current_doc["_rev"], sg_current_doc["_rev"]]
    client.verify_open_revs(url=sg_admin_url, db=sg_db, doc_id=sg_current_doc["_id"], expected_open_revs=expected_sg_revs)
def test_client_to_sync_gateway_complex_replication_with_revs_limit(setup_client_syncgateway_test):
    """ Ported from sync_gateway tests repo
    ...  1.  Clear server buckets
    ...  2.  Restart liteserv with _session
    ...  3.  Restart sync_gateway wil that config
    ...  4.  Create db on LiteServ
    ...  5.  Add numDocs to LiteServ db
    ...  6.  Setup push replication from LiteServ db to sync_gateway
    ...  7.  Verify doc present on sync_gateway (number of docs)
    ...  8.  Update sg docs numRevs * 4 = 480
    ...  9.  Update docs on LiteServ db numRevs * 4 = 480
    ...  10. Setup pull replication from sg -> liteserv db
    ...  11. Verify all docs are replicated
    ...  12. compact LiteServ db (POST _compact)
    ...  13. Verify number of revs in LiteServ db (?revs_info=true) check rev status == available fail if revs available > revs limit
    ...  14. Delete LiteServ db conflicts (?conflicts=true) DELETE _conflicts
    ...  15. Create numDoc number of docs in LiteServ db
    ...  16. Update LiteServ db docs numRevs * 5 (600)
    ...  17. Verify LiteServ db revs is < 602
    ...  18. Verify LiteServ db docs revs prefix (9 * numRevs + 3)
    ...  19. Compact LiteServ db
    ...  20. Verify number of revs <= 10
    ...  21. Delete LiteServ docs
    ...  22. Delete Server bucket
    ...  23. Delete LiteServ db
    """

    ls_db_name = "ls_db"
    sg_db = "db"
    sg_user_name = "sg_user"
    num_docs = 10
    num_revs = 100

    cluster_config = setup_client_syncgateway_test["cluster_config"]
    ls_url = setup_client_syncgateway_test["ls_url"]
    sg_url = setup_client_syncgateway_test["sg_url"]
    sg_admin_url = setup_client_syncgateway_test["sg_admin_url"]

    sg_helper = SyncGateway()
    sg_helper.start_sync_gateway(
        cluster_config=cluster_config,
        url=sg_url,
        config="{}/walrus-revs-limit.json".format(SYNC_GATEWAY_CONFIGS)
    )

    log_info("Running 'test_client_to_sync_gateway_complex_replication_with_revs_limit'")
    log_info("ls_url: {}".format(ls_url))
    log_info("sg_admin_url: {}".format(sg_admin_url))
    log_info("sg_url: {}".format(sg_url))

    client = MobileRestClient()

    # Test the endpoint, listener does not support users but should have a default response
    client.get_session(url=ls_url)

    sg_user_channels = ["NBC"]
    client.create_user(url=sg_admin_url, db=sg_db, name=sg_user_name, password="******", channels=sg_user_channels)
    sg_session = client.create_session(url=sg_admin_url, db=sg_db, name=sg_user_name)

    ls_db = client.create_database(url=ls_url, name=ls_db_name)
    ls_db_docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs, id_prefix=ls_db, channels=sg_user_channels)
    assert len(ls_db_docs) == num_docs

    # Start replication ls_db -> sg_db
    repl_one = client.start_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_admin_url, to_db=sg_db
    )

    client.verify_docs_present(url=sg_admin_url, db=sg_db, expected_docs=ls_db_docs)

    # Delay is to the updates here due to couchbase/couchbase-lite-ios#1277.
    # Basically, if your revs depth is small and someone is updating a doc past the revs depth before a push replication,
    # the push replication will have no common ancestor with sync_gateway causing conflicts to be created.
    # Adding a delay between updates helps this situation. There is an alternative for CBL mac and CBL NET to change the default revs client depth
    # but that is not configurable for Android.
    # Currently adding a delay will allow the replication to act as expected for all platforms now.
    client.update_docs(url=sg_url, db=sg_db, docs=ls_db_docs, number_updates=num_revs, delay=0.1, auth=sg_session)
    client.update_docs(url=ls_url, db=ls_db, docs=ls_db_docs, number_updates=num_revs, delay=0.1)

    # Start replication ls_db <- sg_db
    repl_two = client.start_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_admin_url, from_db=sg_db,
        to_db=ls_db
    )

    client.wait_for_replication_status_idle(url=ls_url, replication_id=repl_one)
    client.wait_for_replication_status_idle(url=ls_url, replication_id=repl_two)

    client.compact_database(url=ls_url, db=ls_db)

    # LiteServ should only have 20 revisions due to built in client revs limit
    client.verify_revs_num_for_docs(url=ls_url, db=ls_db, docs=ls_db_docs, expected_revs_per_doc=20)

    # Sync Gateway should have 100 revisions due to the specified revs_limit in the sg config and possible conflict winners from the liteserv db
    client.verify_max_revs_num_for_docs(url=sg_url, db=sg_db, docs=ls_db_docs, expected_max_number_revs_per_doc=100, auth=sg_session)

    client.delete_conflicts(url=ls_url, db=ls_db, docs=ls_db_docs)
    expected_generation = num_revs + 1
    client.verify_docs_rev_generations(url=ls_url, db=ls_db, docs=ls_db_docs, expected_generation=expected_generation)
    client.verify_docs_rev_generations(url=sg_url, db=sg_db, docs=ls_db_docs, expected_generation=expected_generation, auth=sg_session)

    client.delete_docs(url=ls_url, db=ls_db, docs=ls_db_docs)
    client.verify_docs_deleted(url=ls_url, db=ls_db, docs=ls_db_docs)
    client.verify_docs_deleted(url=sg_admin_url, db=sg_db, docs=ls_db_docs)

    ls_db_docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs, id_prefix=ls_db, channels=sg_user_channels)
    assert len(ls_db_docs) == 10

    expected_revs = num_revs + 20 + 2
    client.update_docs(url=ls_url, db=ls_db, docs=ls_db_docs, delay=0.1, number_updates=num_revs)

    client.verify_max_revs_num_for_docs(url=ls_url, db=ls_db, docs=ls_db_docs, expected_max_number_revs_per_doc=expected_revs)

    expected_generation = (num_revs * 2) + 3
    client.verify_docs_rev_generations(url=ls_url, db=ls_db, docs=ls_db_docs, expected_generation=expected_generation)

    client.compact_database(url=ls_url, db=ls_db)
    client.verify_revs_num_for_docs(url=ls_url, db=ls_db, docs=ls_db_docs, expected_revs_per_doc=20)

    client.stop_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_admin_url, to_db=sg_db
    )

    client.stop_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_admin_url, from_db=sg_db,
        to_db=ls_db
    )

    client.wait_for_no_replications(url=ls_url)

    client.delete_conflicts(url=ls_url, db=ls_db, docs=ls_db_docs)
    client.delete_conflicts(url=sg_url, db=sg_db, docs=ls_db_docs, auth=sg_session)
    client.delete_docs(url=ls_url, db=ls_db, docs=ls_db_docs)

    # Start push pull and verify that all docs are deleted
    # Start replication ls_db -> sg_db
    repl_one = client.start_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_admin_url, to_db=sg_db
    )

    # Start replication ls_db <- sg_db
    repl_two = client.start_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_admin_url, from_db=sg_db,
        to_db=ls_db
    )

    client.verify_docs_deleted(url=ls_url, db=ls_db, docs=ls_db_docs)
    client.verify_docs_deleted(url=sg_admin_url, db=sg_db, docs=ls_db_docs)
def test_replication_with_session_cookie(setup_client_syncgateway_test):
    """Regression test for https://github.com/couchbase/couchbase-lite-android/issues/817
    1. SyncGateway Config with guest disabled = true and One user added (e.g. user1 / 1234)
    2. Create a new session on SGW for the user1 by using POST /_session.
       Capture the SyncGatewaySession cookie from the set-cookie in the response header.
    3. Start continuous push and pull replicator on the LiteServ with SyncGatewaySession cookie.
       Make sure that both replicators start correctly
    4. Delete the session from SGW by sending DELETE /_sessions/ to SGW
    5. Cancel both push and pull replicator on the LiteServ
    6. Repeat step 1 and 2
    """

    ls_db = "ls_db"
    sg_db = "db"

    cluster_config = setup_client_syncgateway_test["cluster_config"]
    ls_url = setup_client_syncgateway_test["ls_url"]
    sg_url = setup_client_syncgateway_test["sg_url"]
    sg_admin_url = setup_client_syncgateway_test["sg_admin_url"]

    sg_helper = SyncGateway()
    sg_helper.start_sync_gateway(
        cluster_config=cluster_config,
        url=sg_url,
        config="{}/walrus-user.json".format(SYNC_GATEWAY_CONFIGS)
    )

    log_info("Running 'test_replication_with_session_cookie'")
    log_info("ls_url: {}".format(ls_url))
    log_info("sg_admin_url: {}".format(sg_admin_url))
    log_info("sg_url: {}".format(sg_url))

    client = MobileRestClient()
    client.create_database(url=ls_url, name=ls_db)

    # Get session header for user_1
    session_header = client.create_session_header(url=sg_url, db=sg_db, name="user_1", password="******")

    # Get session id from header
    session_parts = re.split("=|;", session_header)
    session_id = session_parts[1]
    log_info("{}: {}".format(session_parts[0], session_id))
    session = (session_parts[0], session_id)

    # Start authenticated push replication
    repl_one = client.start_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_url,
        to_db=sg_db,
        to_auth=session_header
    )

    # Start authenticated pull replication
    repl_two = client.start_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_url,
        from_db=sg_db,
        from_auth=session_header,
        to_db=ls_db,
    )

    # Wait for 2 replications to be 'Idle', On .NET they may not be immediately available via _active_tasks
    client.wait_for_replication_status_idle(ls_url, repl_one)
    client.wait_for_replication_status_idle(ls_url, repl_two)

    replications = client.get_replications(ls_url)
    assert len(replications) == 2, "2 replications (push / pull should be running)"

    num_docs_pushed = 100

    # Sanity test docs
    ls_docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs_pushed, id_prefix="ls_doc", channels=["ABC"])
    assert len(ls_docs) == num_docs_pushed

    sg_docs = client.add_docs(url=sg_url, db=sg_db, number=num_docs_pushed, id_prefix="sg_doc", auth=session, channels=["ABC"])
    assert len(sg_docs) == num_docs_pushed

    all_docs = client.merge(ls_docs, sg_docs)
    log_info(all_docs)

    client.verify_docs_present(url=sg_admin_url, db=sg_db, expected_docs=all_docs)
    client.verify_docs_present(url=ls_url, db=ls_db, expected_docs=all_docs)

    # GET from session endpoint /{db}/_session/{session-id}
    session = client.get_session(url=sg_admin_url, db=sg_db, session_id=session_id)
    assert len(session["userCtx"]["channels"]) == 2, "There should be only 2 channels for the user"
    assert "ABC" in session["userCtx"]["channels"], "The channel info should contain 'ABC'"
    assert session["userCtx"]["name"] == "user_1", "The user should have the name 'user_1'"
    assert len(session["authentication_handlers"]) == 2, "There should be 2 authentication_handlers"
    assert "default" in session["authentication_handlers"], "Did not find 'default' in authentication_headers"
    assert "cookie" in session["authentication_handlers"], "Did not find 'cookie' in authentication_headers"

    log_info("SESSIONs: {}".format(session))

    # Delete session via sg admin port and _user rest endpoint
    client.delete_session(url=sg_admin_url, db=sg_db, user_name="user_1", session_id=session_id)

    # Make sure session is deleted
    try:
        session = client.get_session(url=sg_admin_url, db=sg_db, session_id=session_id)
    except HTTPError as he:
        expected_error_code = he.response.status_code
        log_info(expected_error_code)

    assert expected_error_code == 404, "Expected 404 status, actual {}".format(expected_error_code)

    # Cancel the replications
    # Stop repl_one
    client.stop_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_url,
        to_db=sg_db,
        to_auth=session_header
    )

    # Stop repl_two
    client.stop_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_url,
        from_db=sg_db,
        from_auth=session_header,
        to_db=ls_db,
    )

    client.wait_for_no_replications(ls_url)
    replications = client.get_replications(ls_url)
    assert len(replications) == 0, "All replications should be stopped"

    # Create new session and new push / pull replications
    session_header = client.create_session_header(url=sg_url, db=sg_db, name="user_1", password="******")

    # Get session id from header
    session_parts = re.split("=|;", session_header)
    session_id = session_parts[1]
    log_info("{}: {}".format(session_parts[0], session_id))

    # Start authenticated push replication
    repl_one = client.start_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_url,
        to_db=sg_db,
        to_auth=session_header
    )

    # Start authenticated pull replication
    repl_two = client.start_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_url,
        from_db=sg_db,
        from_auth=session_header,
        to_db=ls_db,
    )

    replications = client.get_replications(ls_url)
    assert len(replications) == 2, "2 replications (push / pull should be running), found: {}".format(2)

    session = client.get_session(url=sg_admin_url, db=sg_db, session_id=session_id)
    assert len(session["userCtx"]["channels"]) == 2, "There should be only 2 channels for the user"
    assert "ABC" in session["userCtx"]["channels"], "The channel info should contain 'ABC'"
    assert session["userCtx"]["name"] == "user_1", "The user should have the name 'user_1'"
    assert len(session["authentication_handlers"]) == 2, "There should be 2 authentication_handlers"
    assert "default" in session["authentication_handlers"], "Did not find 'default' in authentication_headers"
    assert "cookie" in session["authentication_handlers"], "Did not find 'cookie' in authentication_headers"

    log_info("SESSIONs: {}".format(session))

    # Delete session via sg admin port and db rest endpoint
    client.delete_session(url=sg_admin_url, db=sg_db, session_id=session_id)

    # Make sure session is deleted
    try:
        session = client.get_session(url=sg_admin_url, db=sg_db, session_id=session_id)
    except HTTPError as he:
        expected_error_code = he.response.status_code
        log_info(expected_error_code)

    assert expected_error_code == 404, "Expected 404 status, actual {}".format(expected_error_code)
def test_multiple_replications_created_with_unique_properties(setup_client_syncgateway_test):
    """Regression test for couchbase/couchbase-lite-java-core#1386
    1. Setup SGW with a remote database name db for an example
    2. Create a local database such as ls_db
    3. Send POST /_replicate with source = ls_db, target = http://localhost:4985/db, continuous = true
    4. Send POST /_replicate with source = ls_db, target = http://localhost:4985/db, continuous = true, doc_ids=["doc1", "doc2"]
    5. Send POST /_replicate with source = ls_db, target = http://localhost:4985/db, continuous = true, filter="filter1"
    6. Make sure that the session_id from each POST /_replicate are different.
    7. Send GET /_active_tasks to make sure that there are 3 tasks created.
    8. Send 3 POST /_replicate withe the same parameter as Step 3=5 plus cancel=true to stop those replicators
    9. Repeat Step 3 - 8 with source = and target = db for testing the pull replicator.
    """

    sg_db = "db"
    ls_db = "ls_db"

    cluster_config = setup_client_syncgateway_test["cluster_config"]
    ls_url = setup_client_syncgateway_test["ls_url"]
    sg_one_admin = setup_client_syncgateway_test["sg_admin_url"]
    sg_one_public = setup_client_syncgateway_test["sg_url"]

    sg_helper = SyncGateway()
    sg_helper.start_sync_gateway(
        cluster_config=cluster_config,
        url=sg_one_public,
        config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
    )

    log_info("Running 'test_multiple_replications_created_with_unique_properties'")
    log_info("ls_url: {}".format(ls_url))
    log_info("sg_one_admin: {}".format(sg_one_admin))
    log_info("sg_one_public: {}".format(sg_one_public))

    client = MobileRestClient()
    client.create_database(url=ls_url, name=ls_db)

    ########
    # PUSH #
    ########
    # Start 3 unique push replication requests
    repl_one = client.start_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_one_admin,
        to_db=sg_db
    )
    client.wait_for_replication_status_idle(ls_url, repl_one)

    repl_two = client.start_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_one_admin,
        to_db=sg_db,
        doc_ids=["doc_1", "doc_2"]
    )
    client.wait_for_replication_status_idle(ls_url, repl_two)

    # Create doc filter and add to the design doc
    filters = {
        "language": "javascript",
        "filters": {
            "sample_filter": "function(doc, req) { if (doc.type && doc.type === \"skip\") { return false; } return true; }"
        }
    }
    client.add_design_doc(url=ls_url, db=ls_db, name="by_type", doc=json.dumps(filters))

    repl_three = client.start_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_one_admin,
        to_db=sg_db,
        repl_filter="by_type/sample_filter"
    )
    client.wait_for_replication_status_idle(ls_url, repl_three)

    # Verify 3 replicaitons are running
    replications = client.get_replications(ls_url)
    log_info(replications)
    assert len(replications) == 3, "Number of replications, Expected: {} Actual: {}".format(
        3,
        len(replications)
    )

    # Stop repl001
    client.stop_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_one_admin,
        to_db=sg_db
    )

    # Stop repl002
    client.stop_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_one_admin,
        to_db=sg_db,
        doc_ids=["doc_1", "doc_2"]
    )

    # Stop repl003
    client.stop_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_one_admin,
        to_db=sg_db,
        repl_filter="by_type/sample_filter"
    )

    # Verify no replications are running
    client.wait_for_no_replications(ls_url)
    replications = client.get_replications(ls_url)
    log_info(replications)
    assert len(replications) == 0, "Number of replications, Expected: {} Actual: {}".format(
        0,
        len(replications)
    )

    ########
    # PULL #
    ########
    # Start 3 unique push replication requests
    repl_four = client.start_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_one_admin,
        from_db=sg_db,
        to_db=ls_db
    )
    client.wait_for_replication_status_idle(ls_url, repl_four)

    # Start filtered pull from sync gateway to LiteServ
    repl_five = client.start_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_one_admin,
        from_db=sg_db,
        to_db=ls_db,
        channels_filter=["ABC", "CBS"]
    )
    client.wait_for_replication_status_idle(ls_url, repl_five)

    # Verify 3 replicaitons are running
    replications = client.get_replications(ls_url)
    log_info(replications)
    assert len(replications) == 2, "Number of replications, Expected: {} Actual: {}".format(
        2,
        len(replications)
    )

    # Stop repl_four
    client.stop_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_one_admin,
        from_db=sg_db,
        to_db=ls_db
    )

    # Stop repl_five
    client.stop_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_one_admin,
        from_db=sg_db,
        to_db=ls_db,
        channels_filter=["ABC", "CBS"]
    )

    # Verify no replications are running
    client.wait_for_no_replications(ls_url)
    replications = client.get_replications(ls_url)
    log_info(replications)
    assert len(replications) == 0, "Number of replications, Expected: {} Actual: {}".format(
        0,
        len(replications)
    )
def test_initial_pull_replication(setup_client_syncgateway_test, continuous):
    """
    1. Prepare sync-gateway to have 10000 documents.
    2. Create a single shot / continuous pull replicator and to pull the docs into a database.
    3. Verify if all of the docs get pulled.
    Referenced issue: couchbase/couchbase-lite-android#955.
    """

    sg_db = "db"
    ls_db = "ls_db"

    num_docs = 10000

    cluster_config = setup_client_syncgateway_test["cluster_config"]
    ls_url = setup_client_syncgateway_test["ls_url"]
    sg_one_admin = setup_client_syncgateway_test["sg_admin_url"]
    sg_one_public = setup_client_syncgateway_test["sg_url"]

    sg_helper = SyncGateway()
    sg_helper.start_sync_gateway(
        cluster_config=cluster_config,
        url=sg_one_public,
        config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
    )

    log_info("Running 'test_initial_pull_replication', continuous: {}".format(continuous))
    log_info("ls_url: {}".format(ls_url))
    log_info("sg_one_admin: {}".format(sg_one_admin))
    log_info("sg_one_public: {}".format(sg_one_public))

    client = MobileRestClient()
    client.create_user(sg_one_admin, sg_db, "seth", password="******", channels=["ABC", "NBC"])
    session = client.create_session(sg_one_admin, sg_db, "seth")

    # Create 'num_docs' docs on sync_gateway
    docs = client.add_docs(
        url=sg_one_public,
        db=sg_db,
        number=num_docs,
        id_prefix="seeded_doc",
        generator="four_k",
        auth=session
    )
    assert len(docs) == num_docs

    client.create_database(url=ls_url, name=ls_db)

    # Start oneshot pull replication
    repl_id = client.start_replication(
        url=ls_url,
        continuous=continuous,
        from_url=sg_one_admin,
        from_db=sg_db,
        to_db=ls_db
    )

    start = time.time()

    if continuous:
        log_info("Waiting for replication status 'Idle' for: {}".format(repl_id))
        # Android will report IDLE status, and drop into the 'verify_docs_present' below
        # due to https://github.com/couchbase/couchbase-lite-java-core/issues/1409
        client.wait_for_replication_status_idle(ls_url, repl_id)
    else:
        log_info("Waiting for no replications: {}".format(repl_id))
        client.wait_for_no_replications(ls_url)

    # Verify docs replicated to client
    client.verify_docs_present(url=ls_url, db=ls_db, expected_docs=docs, timeout=240)

    all_docs_replicated_time = time.time() - start
    log_info("Replication took: {}s".format(all_docs_replicated_time))

    # Verify docs show up in client's changes feed
    client.verify_docs_in_changes(url=ls_url, db=ls_db, expected_docs=docs)

    replications = client.get_replications(url=ls_url)

    if continuous:
        assert len(replications) == 1, "There should only be one replication running"
        assert replications[0]["status"] == "Idle", "Replication Status should be 'Idle'"
        assert replications[0]["continuous"], "Running replication should be continuous"
        # Only .NET has an 'error' property
        if "error" in replications[0]:
            assert len(replications[0]["error"]) == 0
    else:
        assert len(replications) == 0, "No replications should be running"
def test_initial_push_replication(setup_client_syncgateway_test, continuous):
    """
    1. Prepare LiteServ to have 10000 documents.
    2. Create a single shot push / continuous replicator and to push the docs into a sync_gateway database.
    3. Verify if all of the docs get pushed.
    """

    sg_db = "db"
    ls_db = "ls_db"
    seth_channels = ["ABC", "NBC"]

    num_docs = 10000

    cluster_config = setup_client_syncgateway_test["cluster_config"]
    ls_url = setup_client_syncgateway_test["ls_url"]
    sg_one_admin = setup_client_syncgateway_test["sg_admin_url"]
    sg_one_public = setup_client_syncgateway_test["sg_url"]

    sg_helper = SyncGateway()
    sg_helper.start_sync_gateway(
        cluster_config=cluster_config,
        url=sg_one_public,
        config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
    )

    log_info("Running 'test_initial_push_replication', continuous: {}".format(continuous))
    log_info("ls_url: {}".format(ls_url))
    log_info("sg_one_admin: {}".format(sg_one_admin))
    log_info("sg_one_public: {}".format(sg_one_public))

    client = MobileRestClient()
    client.create_user(sg_one_admin, sg_db, "seth", password="******", channels=seth_channels)
    session = client.create_session(sg_one_admin, sg_db, "seth")

    client.create_database(url=ls_url, name=ls_db)

    # Create 'num_docs' docs on LiteServ
    docs = client.add_docs(
        url=ls_url,
        db=ls_db,
        number=num_docs,
        id_prefix="seeded_doc",
        generator="four_k",
        channels=seth_channels
    )
    assert len(docs) == num_docs

    # Start push replication
    repl_id = client.start_replication(
        url=ls_url,
        continuous=continuous,
        from_db=ls_db,
        to_url=sg_one_admin,
        to_db=sg_db
    )

    if continuous:
        log_info("Waiting for replication status 'Idle' for: {}".format(repl_id))
        client.wait_for_replication_status_idle(ls_url, repl_id)
    else:
        log_info("Waiting for no replications: {}".format(repl_id))
        client.wait_for_no_replications(ls_url)

    # Verify docs replicated to sync_gateway
    client.verify_docs_present(url=sg_one_public, db=sg_db, expected_docs=docs, auth=session)

    # Verify docs show up in sync_gateway's changes feed
    client.verify_docs_in_changes(url=sg_one_public, db=sg_db, expected_docs=docs, auth=session)

    replications = client.get_replications(url=ls_url)

    if continuous:
        assert len(replications) == 1, "There should only be one replication running"
        assert replications[0]["status"] == "Idle", "Replication Status should be 'Idle'"
        assert replications[0]["continuous"], "Running replication should be continuous"
        # Only .NET has an 'error' property
        if "error" in replications[0]:
            assert len(replications[0]["error"]) == 0
    else:
        assert len(replications) == 0, "No replications should be running"
def test_peer_2_peer_sanity(setup_p2p_test):
    """
    1. Sanity P2P Scenario
    2. Launch LiteServ 1 and LiteServ 2
    3. Create a database on each LiteServ
    4. Start continuous push pull replication from each db to the other
    5. Add docs to each db
    6. Verify the docs show up at each db
    7. Verify the docs show up in the database's changes feed.
    """

    ls_url_one = setup_p2p_test["ls_url_one"]
    ls_url_two = setup_p2p_test["ls_url_two"]

    num_docs_per_db = 1000

    log_info("ls_url_one: {}".format(ls_url_one))
    log_info("ls_url_two: {}".format(ls_url_two))

    client = MobileRestClient()

    log_info("Creating databases")
    ls_db1 = client.create_database(url=ls_url_one, name="ls_db1")
    ls_db2 = client.create_database(url=ls_url_two, name="ls_db2")

    # Setup continuous push / pull replication from LiteServ 1 ls_db1 to LiteServ 2 ls_db2
    repl_one = client.start_replication(
        url=ls_url_one,
        continuous=True,
        from_db=ls_db1,
        to_url=ls_url_two, to_db=ls_db2
    )

    repl_two = client.start_replication(
        url=ls_url_one,
        continuous=True,
        from_url=ls_url_two, from_db=ls_db2,
        to_db=ls_db1
    )

    # Setup continuous push / pull replication from LiteServ 2 ls_db2 to LiteServ 1 ls_db1
    repl_three = client.start_replication(
        url=ls_url_two,
        continuous=True,
        from_db=ls_db2,
        to_url=ls_url_one, to_db=ls_db1
    )

    repl_four = client.start_replication(
        url=ls_url_two,
        continuous=True,
        from_url=ls_url_one, from_db=ls_db1,
        to_db=ls_db2
    )

    client.wait_for_replication_status_idle(url=ls_url_one, replication_id=repl_one)
    client.wait_for_replication_status_idle(url=ls_url_one, replication_id=repl_two)
    client.wait_for_replication_status_idle(url=ls_url_two, replication_id=repl_three)
    client.wait_for_replication_status_idle(url=ls_url_two, replication_id=repl_four)

    ls_url_one_replications = client.get_replications(ls_url_one)
    assert len(ls_url_one_replications) == 2

    ls_url_two_replications = client.get_replications(ls_url_two)
    assert len(ls_url_two_replications) == 2

    ls_db1_docs = client.add_docs(url=ls_url_one, db=ls_db1, number=num_docs_per_db, id_prefix="test_ls_db1")
    assert len(ls_db1_docs) == num_docs_per_db

    ls_db2_docs = client.add_docs(url=ls_url_two, db=ls_db2, number=num_docs_per_db, id_prefix="test_ls_db2")
    assert len(ls_db2_docs) == num_docs_per_db

    all_docs = client.merge(ls_db1_docs, ls_db2_docs)
    assert len(all_docs) == 2000

    client.verify_docs_present(url=ls_url_one, db=ls_db1, expected_docs=all_docs)
    client.verify_docs_present(url=ls_url_two, db=ls_db2, expected_docs=all_docs)

    client.verify_docs_in_changes(url=ls_url_one, db=ls_db1, expected_docs=all_docs)
    client.verify_docs_in_changes(url=ls_url_two, db=ls_db2, expected_docs=all_docs)
Пример #11
0
def test_peer_2_peer_sanity(setup_p2p_test):
    """
    1. Sanity P2P Scenario
    2. Launch LiteServ 1 and LiteServ 2
    3. Create a database on each LiteServ
    4. Start continuous push pull replication from each db to the other
    5. Add docs to each db
    6. Verify the docs show up at each db
    7. Verify the docs show up in the database's changes feed.
    """

    ls_url_one = setup_p2p_test["ls_url_one"]
    ls_url_two = setup_p2p_test["ls_url_two"]

    num_docs_per_db = 1000

    log_info("ls_url_one: {}".format(ls_url_one))
    log_info("ls_url_two: {}".format(ls_url_two))

    client = MobileRestClient()

    log_info("Creating databases")
    ls_db1 = client.create_database(url=ls_url_one, name="ls_db1")
    ls_db2 = client.create_database(url=ls_url_two, name="ls_db2")

    # Setup continuous push / pull replication from LiteServ 1 ls_db1 to LiteServ 2 ls_db2
    repl_one = client.start_replication(
        url=ls_url_one,
        continuous=True,
        from_db=ls_db1,
        to_url=ls_url_two, to_db=ls_db2
    )

    repl_two = client.start_replication(
        url=ls_url_one,
        continuous=True,
        from_url=ls_url_two, from_db=ls_db2,
        to_db=ls_db1
    )

    # Setup continuous push / pull replication from LiteServ 2 ls_db2 to LiteServ 1 ls_db1
    repl_three = client.start_replication(
        url=ls_url_two,
        continuous=True,
        from_db=ls_db2,
        to_url=ls_url_one, to_db=ls_db1
    )

    repl_four = client.start_replication(
        url=ls_url_two,
        continuous=True,
        from_url=ls_url_one, from_db=ls_db1,
        to_db=ls_db2
    )

    client.wait_for_replication_status_idle(url=ls_url_one, replication_id=repl_one)
    client.wait_for_replication_status_idle(url=ls_url_one, replication_id=repl_two)
    client.wait_for_replication_status_idle(url=ls_url_two, replication_id=repl_three)
    client.wait_for_replication_status_idle(url=ls_url_two, replication_id=repl_four)

    ls_url_one_replications = client.get_replications(ls_url_one)
    assert len(ls_url_one_replications) == 2

    ls_url_two_replications = client.get_replications(ls_url_two)
    assert len(ls_url_two_replications) == 2

    ls_db1_docs = client.add_docs(url=ls_url_one, db=ls_db1, number=num_docs_per_db, id_prefix="test_ls_db1")
    assert len(ls_db1_docs) == num_docs_per_db

    ls_db2_docs = client.add_docs(url=ls_url_two, db=ls_db2, number=num_docs_per_db, id_prefix="test_ls_db2")
    assert len(ls_db2_docs) == num_docs_per_db

    all_docs = client.merge(ls_db1_docs, ls_db2_docs)
    assert len(all_docs) == 2000

    client.verify_docs_present(url=ls_url_one, db=ls_db1, expected_docs=all_docs)
    client.verify_docs_present(url=ls_url_two, db=ls_db2, expected_docs=all_docs)

    client.verify_docs_in_changes(url=ls_url_one, db=ls_db1, expected_docs=all_docs)
    client.verify_docs_in_changes(url=ls_url_two, db=ls_db2, expected_docs=all_docs)
Пример #12
0
def test_upgrade(params_from_base_test_setup):
    """
    @summary
        The initial versions of SG and CBS has already been provisioned at this point
        We have to upgrade them to the upgraded versions
    """
    cluster_config = params_from_base_test_setup['cluster_config']
    mode = params_from_base_test_setup['mode']
    xattrs_enabled = params_from_base_test_setup['xattrs_enabled']
    ls_url = params_from_base_test_setup["ls_url"]
    server_version = params_from_base_test_setup['server_version']
    sync_gateway_version = params_from_base_test_setup['sync_gateway_version']
    server_upgraded_version = params_from_base_test_setup[
        'server_upgraded_version']
    sync_gateway_upgraded_version = params_from_base_test_setup[
        'sync_gateway_upgraded_version']
    sg_url = params_from_base_test_setup['sg_url']
    sg_admin_url = params_from_base_test_setup['sg_admin_url']
    num_docs = int(params_from_base_test_setup['num_docs'])
    cbs_platform = params_from_base_test_setup['cbs_platform']
    cbs_toy_build = params_from_base_test_setup['cbs_toy_build']
    sg_conf = "{}/resources/sync_gateway_configs/sync_gateway_default_functional_tests_{}.json".format(
        os.getcwd(), mode)

    # Add data to liteserv
    client = MobileRestClient()
    log_info("ls_url: {}".format(ls_url))
    ls_db = client.create_database(ls_url, name="ls_db")

    # Create user and session on SG
    sg_user_channels = ["sg_user_channel"]
    sg_db = "db"
    sg_user_name = "sg_user"
    sg_user_password = "******"
    client.create_user(url=sg_admin_url,
                       db=sg_db,
                       name=sg_user_name,
                       password=sg_user_password,
                       channels=sg_user_channels)
    sg_session = client.create_session(url=sg_admin_url,
                                       db=sg_db,
                                       name=sg_user_name,
                                       password=sg_user_password)

    log_info(
        "Starting continuous push pull replication from liteserv to sync gateway"
    )
    repl_one = client.start_replication(url=ls_url,
                                        continuous=True,
                                        from_db=ls_db,
                                        to_url=sg_url,
                                        to_db=sg_db,
                                        to_auth=sg_session)
    client.wait_for_replication_status_idle(ls_url, repl_one)

    log_info("Starting replication from sync gateway to liteserv")
    client.start_replication(url=ls_url,
                             continuous=True,
                             from_url=sg_url,
                             from_db=sg_db,
                             from_auth=sg_session,
                             to_db=ls_db)

    # Add docs to liteserv
    added_docs = add_docs_to_client_task(client=client,
                                         url=ls_url,
                                         db=ls_db,
                                         channels=sg_user_channels,
                                         num_docs=num_docs)
    log_info("Added {} docs".format(len(added_docs)))

    # start updating docs
    terminator_doc_id = 'terminator'
    with ProcessPoolExecutor() as up:
        # Start updates in background process
        updates_future = up.submit(update_docs, client, ls_url, ls_db,
                                   added_docs, sg_session, terminator_doc_id)

        # Supported upgrade process
        # 1. Upgrade SGs first docmeta -> docmeta - CBS 5.0.0 does not support TAP.
        # 2. Upgrade the CBS cluster.
        # 3. Enable import/xattrs on SGs

        # Upgrade SG docmeta -> docmeta
        cluster_util = ClusterKeywords()
        topology = cluster_util.get_cluster_topology(cluster_config,
                                                     lb_enable=False)
        sync_gateways = topology["sync_gateways"]
        sg_accels = topology["sg_accels"]

        upgrade_sync_gateway(sync_gateways, sync_gateway_version,
                             sync_gateway_upgraded_version, sg_conf,
                             cluster_config)

        if mode == "di":
            upgrade_sg_accel(sg_accels, sync_gateway_version,
                             sync_gateway_upgraded_version, sg_conf,
                             cluster_config)

        # Upgrade CBS
        cluster = Cluster(config=cluster_config)
        if len(cluster.servers) < 2:
            raise Exception("Please provide at least 3 servers")

        server_urls = []
        for server in cluster.servers:
            server_urls.append(server.url)

        primary_server = cluster.servers[0]
        secondary_server = cluster.servers[1]
        servers = cluster.servers[1:]

        upgrade_server_cluster(servers,
                               primary_server,
                               secondary_server,
                               server_version,
                               server_upgraded_version,
                               server_urls,
                               cluster_config,
                               cbs_platform,
                               toy_build=cbs_toy_build)

        # Restart SGs after the server upgrade
        sg_obj = SyncGateway()
        for sg in sync_gateways:
            sg_ip = host_for_url(sg["admin"])
            log_info("Restarting sync gateway {}".format(sg_ip))
            sg_obj.restart_sync_gateways(cluster_config=cluster_config,
                                         url=sg_ip)
            time.sleep(5)

        if mode == "di":
            ac_obj = SyncGateway()
            for ac in sg_accels:
                ac_ip = host_for_url(ac)
                log_info("Restarting sg accel {}".format(ac_ip))
                ac_obj.restart_sync_gateways(cluster_config=cluster_config,
                                             url=ac_ip)
                time.sleep(5)

        if xattrs_enabled:
            # Enable xattrs on all SG/SGAccel nodes
            # cc - Start 1 SG with import enabled, all with XATTRs enabled
            # di - All SGs/SGAccels with xattrs enabled - this will also enable import on SGAccel
            #    - Do not enable import in SG.
            if mode == "cc":
                enable_import = True
            elif mode == "di":
                enable_import = False

            if mode == "di":
                ac_obj = SyncGateway()
                for ac in sg_accels:
                    ac_ip = host_for_url(ac)
                    ac_obj.enable_import_xattrs(cluster_config=cluster_config,
                                                sg_conf=sg_conf,
                                                url=ac_ip,
                                                enable_import=False)

            sg_obj = SyncGateway()
            for sg in sync_gateways:
                sg_ip = host_for_url(sg["admin"])
                sg_obj.enable_import_xattrs(cluster_config=cluster_config,
                                            sg_conf=sg_conf,
                                            url=sg_ip,
                                            enable_import=enable_import)
                enable_import = False
                # Check Import showing up on all nodes

        send_changes_termination_doc(auth=sg_session,
                                     terminator_doc_id=terminator_doc_id,
                                     terminator_channel=sg_user_channels,
                                     ls_url=ls_url,
                                     ls_db=ls_db)
        log_info("Waiting for doc updates to complete")
        updated_doc_revs = updates_future.result()

        log_info("Stopping replication from liteserv to sync gateway")
        # Stop repl_one
        client.stop_replication(url=ls_url,
                                continuous=True,
                                from_db=ls_db,
                                to_url=sg_url,
                                to_db=sg_db,
                                to_auth=sg_session)

        log_info("Stopping replication from sync gateway to liteserv")
        # Stop repl_two
        client.stop_replication(url=ls_url,
                                continuous=True,
                                from_url=sg_url,
                                from_db=sg_db,
                                from_auth=sg_session,
                                to_db=ls_db)
        # Gather the new revs for verification
        log_info("Gathering the updated revs for verification")
        doc_ids = []
        for i in range(len(added_docs)):
            doc_ids.append(added_docs[i]["id"])
            if added_docs[i]["id"] in updated_doc_revs:
                added_docs[i]["rev"] = updated_doc_revs[added_docs[i]["id"]]

        # Verify rev, doc bdy and revision history of all docs
        verify_sg_docs_revision_history(url=sg_admin_url,
                                        db=sg_db,
                                        added_docs=added_docs)

        if xattrs_enabled:
            # Verify through SDK that there is no _sync property in the doc body
            bucket_name = 'data-bucket'
            sdk_client = Bucket('couchbase://{}/{}'.format(
                primary_server.host, bucket_name),
                                password='******',
                                timeout=SDK_TIMEOUT)
            log_info("Fetching docs from SDK")
            docs_from_sdk = sdk_client.get_multi(doc_ids)

            log_info("Verifying that there is no _sync property in the docs")
            for i in docs_from_sdk:
                if "_sync" in docs_from_sdk[i].value:
                    raise Exception(
                        "_sync section found in docs after upgrade")
Пример #13
0
def test_auto_prune_with_pull(setup_client_syncgateway_test):
    """Sanity test for autopruning with replication

    1. Create a database on LiteServ (ls_db)
    2. Add doc to sync gateway
    3. Update doc 50 times on sync_gateway
    4. Set up pull replication from sync_gateway db to LiteServ db
    5. Verify number of revisions on client is default (20)
    """

    cluster_config = setup_client_syncgateway_test["cluster_config"]
    sg_mode = setup_client_syncgateway_test["sg_mode"]
    ls_url = setup_client_syncgateway_test["ls_url"]
    sg_url = setup_client_syncgateway_test["sg_url"]
    sg_admin_url = setup_client_syncgateway_test["sg_admin_url"]

    client = MobileRestClient()

    sg_config = sync_gateway_config_path_for_mode(
        "listener_tests/listener_tests", sg_mode)
    c = cluster.Cluster(config=cluster_config)
    c.reset(sg_config_path=sg_config)

    log_info("Running 'test_auto_prune_listener_sanity' ...")
    log_info("ls_url: {}".format(ls_url))
    log_info("sg_url: {}".format(sg_url))
    log_info("sg_admin_url: {}".format(sg_admin_url))

    num_docs = 1
    num_revs = 50

    sg_user_channels = ["NBC"]
    sg_db = "db"
    sg_user_name = "sg_user"

    client.create_user(url=sg_admin_url,
                       db=sg_db,
                       name=sg_user_name,
                       password="******",
                       channels=sg_user_channels)
    sg_session = client.create_session(url=sg_admin_url,
                                       db=sg_db,
                                       name=sg_user_name)

    ls_db = client.create_database(url=ls_url, name="ls_db")

    sg_db_docs = client.add_docs(url=sg_url,
                                 db=sg_db,
                                 number=num_docs,
                                 id_prefix=sg_db,
                                 channels=sg_user_channels,
                                 auth=sg_session)
    assert len(sg_db_docs) == num_docs

    sg_docs_update = client.update_docs(url=sg_url,
                                        db=sg_db,
                                        docs=sg_db_docs,
                                        number_updates=num_revs,
                                        auth=sg_session)

    # Start continuous replication ls_db <- sg_db
    repl_one = client.start_replication(url=ls_url,
                                        continuous=True,
                                        from_url=sg_admin_url,
                                        from_db=sg_db,
                                        to_db=ls_db)

    client.wait_for_replication_status_idle(url=ls_url,
                                            replication_id=repl_one)
    client.verify_docs_present(url=ls_url,
                               db=ls_db,
                               expected_docs=sg_docs_update)
    client.verify_revs_num_for_docs(url=ls_url,
                                    db=ls_db,
                                    docs=sg_docs_update,
                                    expected_revs_per_doc=20)