def test_rolling_ttl_remove_expirary(params_from_base_test_setup, sg_conf_name): """ 1. PUT /db/doc1 via SG with property "_exp":3 2. Once per second for 10 seconds, update /db/doc1 with a new revision (also with "_exp":3) 3. Update /db/doc1 with a revision with no expiry 3. Get /db/doc1. Assert response is 200 """ cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper = ClusterKeywords() topology = cluster_helper.get_cluster_topology(cluster_config) cluster_helper.reset_cluster( cluster_config=cluster_config, sync_gateway_config=sg_conf ) cbs_url = topology["couchbase_servers"][0] sg_url = topology["sync_gateways"][0]["public"] sg_url_admin = topology["sync_gateways"][0]["admin"] log_info("Running 'test_rolling_ttl_remove_expirary'") log_info("cbs_url: {}".format(cbs_url)) log_info("sg_url: {}".format(sg_url)) log_info("sg_url_admin: {}".format(sg_url_admin)) sg_db = "db" sg_user_name = "sg_user" sg_user_password = "******" sg_user_channels = ["NBC", "ABC"] client = MobileRestClient() client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels) sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name) doc_exp_3_body = document.create_doc(doc_id="exp_3", expiry=3, channels=sg_user_channels) doc_exp_10_body = document.create_doc(doc_id="exp_10", expiry=10, channels=sg_user_channels) doc_exp_3 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_3_body, auth=sg_user_session) doc_exp_10 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_10_body, auth=sg_user_session) client.update_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], number_updates=10, expiry=3, delay=1, auth=sg_user_session) client.update_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], number_updates=1, auth=sg_user_session) # If expiry was not removed in the last update, this would expire doc_exp_3 time.sleep(5) # doc_exp_3 should still be around due to removal of expiry doc_exp_3 = client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], auth=sg_user_session) assert doc_exp_3["_id"] == "exp_3" # doc_exp_10 should be expired due to the updates (10s) + sleep (5s) with pytest.raises(HTTPError) as he: client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_10["id"], auth=sg_user_session) assert he.value[0].startswith("404 Client Error: Not Found for url:")
def test_rolling_ttl_expires(params_from_base_test_setup, sg_conf_name): """ 1. PUT /db/doc1 via SG with property "_exp":3 2. Update /db/doc1 10 times with a new revision (also with "_exp":3) 3. Wait 5 seconds 4. Get /db/doc1. Assert response is 200 """ cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper = ClusterKeywords() topology = cluster_helper.get_cluster_topology(cluster_config) cluster_helper.reset_cluster( cluster_config=cluster_config, sync_gateway_config=sg_conf ) cbs_url = topology["couchbase_servers"][0] sg_url = topology["sync_gateways"][0]["public"] sg_url_admin = topology["sync_gateways"][0]["admin"] log_info("Running 'test_rolling_ttl_expires'") log_info("cbs_url: {}".format(cbs_url)) log_info("sg_url: {}".format(sg_url)) log_info("sg_url_admin: {}".format(sg_url_admin)) sg_db = "db" sg_user_name = "sg_user" sg_user_password = "******" sg_user_channels = ["NBC", "ABC"] client = MobileRestClient() client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels) sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name) doc_exp_3_body = document.create_doc(doc_id="exp_3", expiry=3, channels=sg_user_channels) doc_exp_10_body = document.create_doc(doc_id="exp_10", expiry=10, channels=sg_user_channels) doc_exp_3 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_3_body, auth=sg_user_session) doc_exp_10 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_10_body, auth=sg_user_session) client.update_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], number_updates=10, expiry=3, auth=sg_user_session) # Sleep should allow doc_exp_3 to expire, but still be in the window to get doc_exp_10 time.sleep(5) # doc_exp_3 should be expired with pytest.raises(HTTPError) as he: client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], auth=sg_user_session) assert he.value[0].startswith("404 Client Error: Not Found for url:") # doc_exp_10 should be available still doc_exp_10_result = client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_10["id"], auth=sg_user_session) assert doc_exp_10_result["_id"] == "exp_10"
def test_removing_expiry(params_from_base_test_setup, sg_conf_name): """ 1. PUT /db/doc1 via SG with property "_exp":3 2. Update /db/doc1 with a new revision with no expiry value 3. After 10 updates, update /db/doc1 with a revision with no expiry """ cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper = ClusterKeywords() topology = cluster_helper.get_cluster_topology(cluster_config) cluster_helper.reset_cluster( cluster_config=cluster_config, sync_gateway_config=sg_conf ) cbs_url = topology["couchbase_servers"][0] sg_url = topology["sync_gateways"][0]["public"] sg_url_admin = topology["sync_gateways"][0]["admin"] log_info("Running 'test_removing_expiry'") log_info("cbs_url: {}".format(cbs_url)) log_info("sg_url: {}".format(sg_url)) log_info("sg_url_admin: {}".format(sg_url_admin)) sg_db = "db" sg_user_name = "sg_user" sg_user_password = "******" sg_user_channels = ["NBC", "ABC"] client = MobileRestClient() client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels) sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name) doc_exp_3_body = document.create_doc(doc_id="exp_3", expiry=3, channels=sg_user_channels) doc_exp_10_body = document.create_doc(doc_id="exp_10", expiry=10, channels=sg_user_channels) doc_exp_3 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_3_body, auth=sg_user_session) doc_exp_10 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_10_body, auth=sg_user_session) doc_exp_3_updated = client.update_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], number_updates=10, auth=sg_user_session) # Sleep should allow an expiry to happen on doc_exp_3 if it had not been removed. # Expected behavior is that the doc_exp_3 will still be around due to the removal of the expiry time.sleep(5) # doc_exp_3 should no longer have an expiry and should not raise an exception doc_exp_3_updated_result = client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3_updated["id"], auth=sg_user_session) assert doc_exp_3_updated_result["_id"] == "exp_3" # doc_exp_10 should be available still and should not raise an exception doc_exp_10_result = client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_10["id"], auth=sg_user_session) assert doc_exp_10_result["_id"] == "exp_10"
def test_non_winning_revisions(params_from_base_test_setup, sg_conf_name): """ Add non-winning revisions to the revision tree and ensure that the changes feed returns the correct revisions Steps: - Create a doc - Add 5 revs - changes, assert rev starts with "6-" from 0, store "last_seq_1" - Create a conflict off first revision ("2-foo") (POST docs, new_edits == false) - changes, assert rev starts with "6-" from "last_seq_1", store "last_seq_2" - changes, assert rev starts with "6-" from 0 - Add a "3-foo" rev with rev "2-foo" as parent - changes, assert rev starts with "6-" from "last_seq_2", store "last_seq_3" - changes, assert rev starts with "6-" from 0 - add tombstone rev as child of "6-" i.e. issue delete on ("6-") - changes, assert rev starts with "3-foo" from "last_seq_3" - changes, assert rev starts with "3-foo" from 0 """ cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] mode = params_from_base_test_setup["mode"] sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) c = cluster.Cluster(cluster_config) c.reset(sg_conf) client = MobileRestClient() seth_user_info = userinfo.UserInfo( name="seth", password="******", channels=["NATGEO"], roles=[] ) seth_auth = client.create_user( url=sg_admin_url, db=sg_db, name=seth_user_info.name, password=seth_user_info.password, channels=seth_user_info.channels ) test_doc_body = document.create_doc(doc_id="test_doc", channels=seth_user_info.channels) rev_gen_1_doc = client.add_doc(url=sg_url, db=sg_db, doc=test_doc_body, auth=seth_auth) rev_gen_6_doc = client.update_doc(url=sg_url, db=sg_db, doc_id=rev_gen_1_doc["id"], number_updates=5, auth=seth_auth) assert rev_gen_6_doc["rev"].startswith("6-") # Get changes until rev generation 6 document shows up start = time.time() last_seq = 0 while True: if time.time() - start > keywords.constants.CLIENT_REQUEST_TIMEOUT: raise keywords.exceptions.TimeoutError("Wait for Replication Status Idle: TIMEOUT") changes_1 = client.get_changes(url=sg_url, db=sg_db, since=last_seq, auth=seth_auth, skip_user_docs=True) last_seq = changes_1["last_seq"] # break when expected rev shows up in changes feed if changes_1["results"] and changes_1["results"][0]["changes"][0]["rev"].startswith("6-"): break assert len(changes_1["results"]) == 1 assert changes_1["results"][0]["id"] == "test_doc" assert changes_1["results"][0]["changes"][0]["rev"].startswith("6-") # Create a conflict off of rev one rev_gen_2_doc_conflict = client.add_conflict( url=sg_url, db=sg_db, doc_id=rev_gen_1_doc["id"], parent_revisions=rev_gen_1_doc["rev"], new_revision="2-foo", auth=seth_auth ) assert rev_gen_2_doc_conflict["id"] == "test_doc" assert rev_gen_2_doc_conflict["rev"] == "2-foo" # Issue changes since changes_1 last_seq above changes_2 = client.get_changes(url=sg_url, db=sg_db, since=changes_1["last_seq"], auth=seth_auth) assert len(changes_2["results"]) == 1 assert changes_2["results"][0]["id"] == "test_doc" assert changes_2["results"][0]["changes"][0]["rev"].startswith("6-") # Issue changes since 0, strip user doc and make sure the doc is still the '6-' rev changes_from_0_one = client.get_changes(url=sg_url, db=sg_db, since=0, auth=seth_auth, skip_user_docs=True) assert len(changes_from_0_one["results"]) == 1 assert changes_from_0_one["results"][0]["id"] == "test_doc" assert changes_from_0_one["results"][0]["changes"][0]["rev"].startswith("6-") # Create a 3-foo rev with 2-foo as the parent rev_gen_3_doc_conflict = client.add_conflict( url=sg_url, db=sg_db, doc_id=rev_gen_2_doc_conflict["id"], parent_revisions=rev_gen_2_doc_conflict["rev"], new_revision="3-foo", auth=seth_auth ) assert rev_gen_3_doc_conflict["id"] == "test_doc" assert rev_gen_3_doc_conflict["rev"] == "3-foo" # Issue changes since changes_2 last_seq above changes_3 = client.get_changes(url=sg_url, db=sg_db, since=changes_2["last_seq"], auth=seth_auth) assert len(changes_3["results"]) == 1 assert changes_3["results"][0]["id"] == "test_doc" assert changes_3["results"][0]["changes"][0]["rev"].startswith("6-") # Issue changes since 0, strip user doc and make sure the doc is still the '6-' rev changes_from_0_two = client.get_changes(url=sg_url, db=sg_db, since=0, auth=seth_auth, skip_user_docs=True) assert len(changes_from_0_two["results"]) == 1 assert changes_from_0_two["results"][0]["id"] == "test_doc" assert changes_from_0_two["results"][0]["changes"][0]["rev"].startswith("6-") # Delete test_doc at rev 6-* client.delete_doc(url=sg_url, db=sg_db, doc_id=rev_gen_6_doc["id"], rev=rev_gen_6_doc["rev"], auth=seth_auth) # Issue changes since changes_3 last_seq above changes_4 = client.get_changes(url=sg_url, db=sg_db, since=changes_3["last_seq"], auth=seth_auth) assert len(changes_4["results"]) == 1 assert changes_4["results"][0]["id"] == "test_doc" assert changes_4["results"][0]["changes"][0]["rev"] == "3-foo" # Issue a oneshot changes since changes_4 last_seq and assert no results are returned changes_5 = client.get_changes(url=sg_url, db=sg_db, since=changes_4["last_seq"], feed="normal", auth=seth_auth) assert len(changes_5["results"]) == 0
def test_winning_conflict_branch_revisions(params_from_base_test_setup, sg_conf_name): """ Add winning conflict revisions to the revision tree and ensure that the changes feed returns the correct revisions Steps: - Create a doc ('test_doc') - Add 5 revs to 'test_doc' - POST _changes, assert rev starts with "6-" from 0, store "last_seq_1" - Create a conflict off first revision ("2-foo") (POST docs, new_edits == false) - Append 5 revisions to the conflicting branch (3-foo with 2-foo as parent, 4-foo with 3-foo as parent ... 7-foo with 6-foo as parent) - GET 'test_doc' and verify that the rev is '7-foo' - POST _changes, assert returns 7-foo """ cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] mode = params_from_base_test_setup["mode"] sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) c = cluster.Cluster(cluster_config) c.reset(sg_conf) client = MobileRestClient() seth_user_info = userinfo.UserInfo( name="seth", password="******", channels=["NATGEO"], roles=[] ) seth_auth = client.create_user( url=sg_admin_url, db=sg_db, name=seth_user_info.name, password=seth_user_info.password, channels=seth_user_info.channels ) test_doc_body = document.create_doc(doc_id="test_doc", channels=seth_user_info.channels) rev_gen_1_doc = client.add_doc(url=sg_url, db=sg_db, doc=test_doc_body, auth=seth_auth) rev_gen_6_doc = client.update_doc(url=sg_url, db=sg_db, doc_id=rev_gen_1_doc["id"], number_updates=5, auth=seth_auth) assert rev_gen_6_doc["rev"].startswith("6-") # Wait until doc shows up in changes feed last_seq = 0 start = time.time() while True: if time.time() - start > keywords.constants.CLIENT_REQUEST_TIMEOUT: raise keywords.exceptions.TimeoutError("Wait for Replication Status Idle: TIMEOUT") changes_1 = client.get_changes(url=sg_url, db=sg_db, since=last_seq, auth=seth_auth, skip_user_docs=True) last_seq = changes_1["last_seq"] if len(changes_1["results"]) > 0 and changes_1["results"][0]["changes"][0]["rev"].startswith("6-"): break assert len(changes_1["results"]) == 1 assert changes_1["results"][0]["id"] == "test_doc" assert changes_1["results"][0]["changes"][0]["rev"].startswith("6-") # Create a conflict off of rev one rev_gen_1_doc_conflict = client.add_conflict( url=sg_url, db=sg_db, doc_id=rev_gen_1_doc["id"], parent_revisions=rev_gen_1_doc["rev"], new_revision="2-foo", auth=seth_auth ) # Update the conflicting branch 5x rev_gen = 3 for _ in range(5): rev_gen_1_doc_conflict = client.add_conflict( url=sg_url, db=sg_db, doc_id=rev_gen_1_doc["id"], parent_revisions=rev_gen_1_doc_conflict["rev"], new_revision="{}-foo".format(rev_gen), auth=seth_auth ) rev_gen += 1 # Wait until doc shows up in changes feed from last_seq from where last changes loop from above left off start = time.time() while True: if time.time() - start > keywords.constants.CLIENT_REQUEST_TIMEOUT: raise keywords.exceptions.TimeoutError("Wait for Replication Status Idle: TIMEOUT") changes_2 = client.get_changes(url=sg_url, db=sg_db, since=last_seq, auth=seth_auth) last_seq = changes_2["last_seq"] if len(changes_2["results"]) > 0: break # Verify that the the "7-foo" rev is return on the changes feed assert len(changes_2["results"]) == 1 assert changes_2["results"][0]["id"] == "test_doc" assert changes_2["results"][0]["changes"][0]["rev"] == "7-foo"
def test_non_winning_revisions(params_from_base_test_setup, sg_conf_name): """ Add non-winning revisions to the revision tree and ensure that the changes feed returns the correct revisions Steps: - Add a doc - Add 5 revs - changes, assert rev starts with "6-" from 0, store "last_seq_1" - Create a conflict off first revision ("2-foo") (POST docs, new_edits == false) - changes, assert rev starts with "6-" from "last_seq_1", store "last_seq_2" - changes, assert rev starts with "6-" from 0 - Add a "3-foo" rev with rev "2-foo" as parent - changes, assert rev starts with "6-" from "last_seq_2", store "last_seq_3" - changes, assert rev starts with "6-" from 0 - add tombstone rev as child of "6-" i.e. issue delete on ("6-") - changes, assert rev starts with "3-foo" from "last_seq_3" - changes, assert rev starts with "3-foo" from 0 """ cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] mode = params_from_base_test_setup["mode"] no_conflicts_enabled = params_from_base_test_setup["no_conflicts_enabled"] if no_conflicts_enabled: pytest.skip( '--no-conflicts is enabled, this test needs to create conflicts, so skipping the test' ) sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) c = cluster.Cluster(cluster_config) c.reset(sg_conf) client = MobileRestClient() seth_user_info = userinfo.UserInfo(name="seth", password="******", channels=["NATGEO"], roles=[]) seth_auth = client.create_user(url=sg_admin_url, db=sg_db, name=seth_user_info.name, password=seth_user_info.password, channels=seth_user_info.channels) test_doc_body = document.create_doc(doc_id="test_doc", channels=seth_user_info.channels) rev_gen_1_doc = client.add_doc(url=sg_url, db=sg_db, doc=test_doc_body, auth=seth_auth) rev_gen_6_doc = client.update_doc(url=sg_url, db=sg_db, doc_id=rev_gen_1_doc["id"], number_updates=5, auth=seth_auth) assert rev_gen_6_doc["rev"].startswith("6-") # Get changes until rev generation 6 document shows up start = time.time() last_seq = 0 while True: if time.time() - start > keywords.constants.CLIENT_REQUEST_TIMEOUT: raise keywords.exceptions.TimeoutError( "Wait for Replication Status Idle: TIMEOUT") changes_1 = client.get_changes(url=sg_url, db=sg_db, since=last_seq, auth=seth_auth, skip_user_docs=True) last_seq = changes_1["last_seq"] # break when expected rev shows up in changes feed if changes_1["results"] and changes_1["results"][0]["changes"][0][ "rev"].startswith("6-"): break assert len(changes_1["results"]) == 1 assert changes_1["results"][0]["id"] == "test_doc" assert changes_1["results"][0]["changes"][0]["rev"].startswith("6-") # Create a conflict off of rev one rev_gen_2_doc_conflict = client.add_conflict( url=sg_url, db=sg_db, doc_id=rev_gen_1_doc["id"], parent_revisions=rev_gen_1_doc["rev"], new_revision="2-foo", auth=seth_auth) assert rev_gen_2_doc_conflict["id"] == "test_doc" assert rev_gen_2_doc_conflict["rev"] == "2-foo" # Issue changes since changes_1 last_seq above changes_2 = client.get_changes(url=sg_url, db=sg_db, since=changes_1["last_seq"], auth=seth_auth) assert len(changes_2["results"]) == 1 assert changes_2["results"][0]["id"] == "test_doc" assert changes_2["results"][0]["changes"][0]["rev"].startswith("6-") # Issue changes since 0, strip user doc and make sure the doc is still the '6-' rev changes_from_0_one = client.get_changes(url=sg_url, db=sg_db, since=0, auth=seth_auth, skip_user_docs=True) assert len(changes_from_0_one["results"]) == 1 assert changes_from_0_one["results"][0]["id"] == "test_doc" assert changes_from_0_one["results"][0]["changes"][0]["rev"].startswith( "6-") # Create a 3-foo rev with 2-foo as the parent rev_gen_3_doc_conflict = client.add_conflict( url=sg_url, db=sg_db, doc_id=rev_gen_2_doc_conflict["id"], parent_revisions=rev_gen_2_doc_conflict["rev"], new_revision="3-foo", auth=seth_auth) assert rev_gen_3_doc_conflict["id"] == "test_doc" assert rev_gen_3_doc_conflict["rev"] == "3-foo" # Issue changes since changes_2 last_seq above changes_3 = client.get_changes(url=sg_url, db=sg_db, since=changes_2["last_seq"], auth=seth_auth) assert len(changes_3["results"]) == 1 assert changes_3["results"][0]["id"] == "test_doc" assert changes_3["results"][0]["changes"][0]["rev"].startswith("6-") # Issue changes since 0, strip user doc and make sure the doc is still the '6-' rev changes_from_0_two = client.get_changes(url=sg_url, db=sg_db, since=0, auth=seth_auth, skip_user_docs=True) assert len(changes_from_0_two["results"]) == 1 assert changes_from_0_two["results"][0]["id"] == "test_doc" assert changes_from_0_two["results"][0]["changes"][0]["rev"].startswith( "6-") # Delete test_doc at rev 6-* client.delete_doc(url=sg_url, db=sg_db, doc_id=rev_gen_6_doc["id"], rev=rev_gen_6_doc["rev"], auth=seth_auth) # Issue changes since changes_3 last_seq above changes_4 = client.get_changes(url=sg_url, db=sg_db, since=changes_3["last_seq"], auth=seth_auth) assert len(changes_4["results"]) == 1 assert changes_4["results"][0]["id"] == "test_doc" assert changes_4["results"][0]["changes"][0]["rev"] == "3-foo" # Issue a oneshot changes since changes_4 last_seq and assert no results are returned changes_5 = client.get_changes(url=sg_url, db=sg_db, since=changes_4["last_seq"], feed="normal", auth=seth_auth) assert len(changes_5["results"]) == 0
def test_winning_conflict_branch_revisions(params_from_base_test_setup, sg_conf_name): """ Add winning conflict revisions to the revision tree and ensure that the changes feed returns the correct revisions Steps: - Add a doc ('test_doc') - Add 5 revs to 'test_doc' - POST _changes, assert rev starts with "6-" from 0, store "last_seq_1" - Create a conflict off first revision ("2-foo") (POST docs, new_edits == false) - Append 5 revisions to the conflicting branch (3-foo with 2-foo as parent, 4-foo with 3-foo as parent ... 7-foo with 6-foo as parent) - GET 'test_doc' and verify that the rev is '7-foo' - POST _changes, assert returns 7-foo """ cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] mode = params_from_base_test_setup["mode"] no_conflicts_enabled = params_from_base_test_setup["no_conflicts_enabled"] if no_conflicts_enabled: pytest.skip( '--no-conflicts is enabled, this test needs to create conflicts, so skipping the test' ) sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) c = cluster.Cluster(cluster_config) c.reset(sg_conf) client = MobileRestClient() seth_user_info = userinfo.UserInfo(name="seth", password="******", channels=["NATGEO"], roles=[]) seth_auth = client.create_user(url=sg_admin_url, db=sg_db, name=seth_user_info.name, password=seth_user_info.password, channels=seth_user_info.channels) test_doc_body = document.create_doc(doc_id="test_doc", channels=seth_user_info.channels) rev_gen_1_doc = client.add_doc(url=sg_url, db=sg_db, doc=test_doc_body, auth=seth_auth) rev_gen_6_doc = client.update_doc(url=sg_url, db=sg_db, doc_id=rev_gen_1_doc["id"], number_updates=5, auth=seth_auth) assert rev_gen_6_doc["rev"].startswith("6-") # Wait until doc shows up in changes feed last_seq = 0 start = time.time() while True: if time.time() - start > keywords.constants.CLIENT_REQUEST_TIMEOUT: raise keywords.exceptions.TimeoutError( "Wait for Replication Status Idle: TIMEOUT") changes_1 = client.get_changes(url=sg_url, db=sg_db, since=last_seq, auth=seth_auth, skip_user_docs=True) last_seq = changes_1["last_seq"] if len(changes_1["results"]) > 0 and changes_1["results"][0][ "changes"][0]["rev"].startswith("6-"): break assert len(changes_1["results"]) == 1 assert changes_1["results"][0]["id"] == "test_doc" assert changes_1["results"][0]["changes"][0]["rev"].startswith("6-") # Create a conflict off of rev one rev_gen_1_doc_conflict = client.add_conflict( url=sg_url, db=sg_db, doc_id=rev_gen_1_doc["id"], parent_revisions=rev_gen_1_doc["rev"], new_revision="2-foo", auth=seth_auth) # Update the conflicting branch 5x rev_gen = 3 for _ in range(5): rev_gen_1_doc_conflict = client.add_conflict( url=sg_url, db=sg_db, doc_id=rev_gen_1_doc["id"], parent_revisions=rev_gen_1_doc_conflict["rev"], new_revision="{}-foo".format(rev_gen), auth=seth_auth) rev_gen += 1 # Wait until doc shows up in changes feed from last_seq from where last changes loop from above left off start = time.time() while True: if time.time() - start > keywords.constants.CLIENT_REQUEST_TIMEOUT: raise keywords.exceptions.TimeoutError( "Wait for Replication Status Idle: TIMEOUT") changes_2 = client.get_changes(url=sg_url, db=sg_db, since=last_seq, auth=seth_auth) last_seq = changes_2["last_seq"] if changes_2["results"][0]["changes"][0]["rev"] == "7-foo" and len( changes_2["results"]) > 0: break # Verify that the the "7-foo" rev is return on the changes feed assert len(changes_2["results"]) == 1 assert changes_2["results"][0]["id"] == "test_doc" assert changes_2["results"][0]["changes"][0]["rev"] == "7-foo"
def test_attachment_revpos_when_ancestor_unavailable_active_revision_doesnt_share_ancestor(params_from_base_test_setup, sg_conf_name): """ Creates a document with an attachment, then updates that document so that the body of the revision that originally pushed the document is no longer available. Add a new revision that's not a child of the active revision, and validate that it's uploaded successfully. Example: 1. Document is created with no attachment at rev-1 2. Server adds revision with attachment at rev-2 {"hello.txt", revpos=2} 2. Document is updated multiple times on the server, goes to rev-4 3. Client attempts to add a new (conflicting) revision 3a, with ancestors rev-2a (with it's own attachment), rev-1. 4. When client attempts to push rev-3a with attachment stub {"hello.txt", revpos=2}. Should throw an error, since the revpos of the attachment is later than the common ancestor (rev-1) """ cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper = ClusterKeywords() cluster_helper.reset_cluster(cluster_config, sg_conf) topology = cluster_helper.get_cluster_topology(cluster_config) cbs_url = topology["couchbase_servers"][0] sg_url = topology["sync_gateways"][0]["public"] sg_url_admin = topology["sync_gateways"][0]["admin"] sg_db = "db" bucket = "data-bucket" log_info("Running 'test_attachment_revpos_when_ancestor_unavailable_active_revision_doesnt_share_ancestor'") log_info("Using cbs_url: {}".format(cbs_url)) log_info("Using sg_url: {}".format(sg_url)) log_info("Using sg_url_admin: {}".format(sg_url_admin)) log_info("Using sg_db: {}".format(sg_db)) log_info("Using bucket: {}".format(bucket)) sg_user_name = "sg_user" sg_user_password = "******" sg_user_channels = ["NBC"] client = MobileRestClient() client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels) sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name) doc = document.create_doc(doc_id="doc_1", content={"sample_key": "sample_val"}, channels=sg_user_channels) doc_gen_1 = client.add_doc(url=sg_url, db=sg_db, doc=doc, auth=sg_user_session) client.update_doc(url=sg_url, db=sg_db, doc_id=doc_gen_1["id"], attachment_name="sample_text.txt", auth=sg_user_session) client.update_doc(url=sg_url, db=sg_db, doc_id=doc_gen_1["id"], auth=sg_user_session) client.update_doc(url=sg_url, db=sg_db, doc_id=doc_gen_1["id"], auth=sg_user_session) parent_rev_list = ["2-foo2", doc_gen_1["rev"]] # Sync Gateway should error since it has no references attachment in its ancestors with pytest.raises(HTTPError) as he: client.add_conflict( url=sg_url, db=sg_db, doc_id=doc_gen_1["id"], parent_revisions=parent_rev_list, new_revision="3-foo3", auth=sg_user_session ) assert he.value[0].startswith("400 Client Error: Bad Request for url: ")
def test_stale_revision_should_not_be_in_the_index(setup_client_syncgateway_test): """original ticket: https://github.com/couchbase/couchbase-lite-android/issues/855 scenario: 1. Running sync_gateway 2. Create database and starts both push and pull replicators through client REST API 3. Create two or more views through client REST API 4. Add doc, and verify doc is index with current revision through client REST API 5. Make sure document is pushed to sync gateway through sync gateway REST API 6. Update doc with sync gateway (not client side) through sync gateway REST API 7. Make sure updated document is pull replicated to client through client REST API 8. Make sure updated document is indexed through client REST API 9. Make sure stale revision is deleted from index. through client REST API 10. Pass criteria """ cluster_config = setup_client_syncgateway_test["cluster_config"] ls_url = setup_client_syncgateway_test["ls_url"] sg_url = setup_client_syncgateway_test["sg_url"] sg_admin_url = setup_client_syncgateway_test["sg_admin_url"] num_docs = 10 num_revs = 100 d_doc_name = "dd" sg_db = "db" sg_user_name = "sg_user" sg_helper = SyncGateway() sg_helper.start_sync_gateway( cluster_config=cluster_config, url=sg_url, config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS) ) log_info("Running 'test_stale_revision_should_not_be_in_the_index'") log_info("ls_url: {}".format(ls_url)) log_info("sg_admin_url: {}".format(sg_admin_url)) log_info("sg_url: {}".format(sg_url)) log_info("num_docs: {}".format(num_docs)) log_info("num_revs: {}".format(num_revs)) client = MobileRestClient() sg_user_channels = ["NBC"] client.create_user(url=sg_admin_url, db=sg_db, name=sg_user_name, password="******", channels=sg_user_channels) sg_session = client.create_session(url=sg_admin_url, db=sg_db, name=sg_user_name) view = """{ "language" : "javascript", "views" : { "content_view" : { "map" : "function(doc, meta) { if (doc.content) { emit(doc._id, doc._rev); } }" }, "update_view" : { "map" : "function(doc, meta) { emit(doc.updates, null); }" } } }""" ls_db = client.create_database(url=ls_url, name="ls_db") # Setup continuous push / pull replication from ls_db1 to sg_db client.start_replication( url=ls_url, continuous=True, from_db=ls_db, to_url=sg_admin_url, to_db=sg_db ) client.start_replication( url=ls_url, continuous=True, from_url=sg_admin_url, from_db=sg_db, to_db=ls_db ) design_doc_id = client.add_design_doc(url=ls_url, db=ls_db, name=d_doc_name, doc=view) client.get_doc(url=ls_url, db=ls_db, doc_id=design_doc_id) doc_body = document.create_doc(doc_id="doc_1", content={"hi": "I should be in the view"}, channels=sg_user_channels) log_info(doc_body) doc_body_2 = document.create_doc(doc_id="doc_2", channels=sg_user_channels) doc = client.add_doc(url=ls_url, db=ls_db, doc=doc_body) doc_2 = client.add_doc(url=ls_url, db=ls_db, doc=doc_body_2) content_view_rows = client.get_view(url=ls_url, db=ls_db, design_doc_id=design_doc_id, view_name="content_view") client.verify_view_row_num(view_response=content_view_rows, expected_num_rows=1) update_view_rows = client.get_view(url=ls_url, db=ls_db, design_doc_id=design_doc_id, view_name="update_view") client.verify_view_row_num(view_response=update_view_rows, expected_num_rows=2) expected_docs_list = [doc, doc_2] client.verify_docs_present(url=sg_url, db=sg_db, expected_docs=expected_docs_list, auth=sg_session) updated_doc = client.update_doc(url=sg_url, db=sg_db, doc_id=doc["id"], number_updates=10, auth=sg_session) client.verify_docs_present(url=ls_url, db=ls_db, expected_docs=updated_doc) content_view_rows_2 = client.get_view(url=ls_url, db=ls_db, design_doc_id=design_doc_id, view_name="content_view") client.verify_view_row_num(view_response=content_view_rows_2, expected_num_rows=1) client.verify_view_contains_keys(view_response=content_view_rows_2, keys=doc["id"]) client.verify_view_contains_values(view_response=content_view_rows_2, values=updated_doc["rev"])
def test_removing_expiry(params_from_base_test_setup, sg_conf_name): """ 1. PUT /db/doc1 via SG with property "_exp":3 2. Update /db/doc1 with a new revision with no expiry value 3. After 10 updates, update /db/doc1 with a revision with no expiry """ cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper = ClusterKeywords() topology = cluster_helper.get_cluster_topology(cluster_config) cluster_helper.reset_cluster(cluster_config=cluster_config, sync_gateway_config=sg_conf) cbs_url = topology["couchbase_servers"][0] sg_url = topology["sync_gateways"][0]["public"] sg_url_admin = topology["sync_gateways"][0]["admin"] log_info("Running 'test_removing_expiry'") log_info("cbs_url: {}".format(cbs_url)) log_info("sg_url: {}".format(sg_url)) log_info("sg_url_admin: {}".format(sg_url_admin)) sg_db = "db" sg_user_name = "sg_user" sg_user_password = "******" sg_user_channels = ["NBC", "ABC"] client = MobileRestClient() client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels) sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name) doc_exp_3_body = document.create_doc(doc_id="exp_3", expiry=3, channels=sg_user_channels) doc_exp_10_body = document.create_doc(doc_id="exp_10", expiry=10, channels=sg_user_channels) doc_exp_3 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_3_body, auth=sg_user_session) doc_exp_10 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_10_body, auth=sg_user_session) doc_exp_3_updated = client.update_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], number_updates=10, auth=sg_user_session) # Sleep should allow an expiry to happen on doc_exp_3 if it had not been removed. # Expected behavior is that the doc_exp_3 will still be around due to the removal of the expiry time.sleep(5) # doc_exp_3 should no longer have an expiry and should not raise an exception doc_exp_3_updated_result = client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3_updated["id"], auth=sg_user_session) assert doc_exp_3_updated_result["_id"] == "exp_3" # doc_exp_10 should be available still and should not raise an exception doc_exp_10_result = client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_10["id"], auth=sg_user_session) assert doc_exp_10_result["_id"] == "exp_10"
def test_attachment_revpos_when_ancestor_unavailable_active_revision_doesnt_share_ancestor( params_from_base_test_setup, sg_conf_name): """ Creates a document with an attachment, then updates that document so that the body of the revision that originally pushed the document is no longer available. Add a new revision that's not a child of the active revision, and validate that it's uploaded successfully. Example: 1. Document is created with no attachment at rev-1 2. Server adds revision with attachment at rev-2 {"hello.txt", revpos=2} 2. Document is updated multiple times on the server, goes to rev-4 3. Client attempts to add a new (conflicting) revision 3a, with ancestors rev-2a (with it's own attachment), rev-1. 4. When client attempts to push rev-3a with attachment stub {"hello.txt", revpos=2}. Should throw an error, since the revpos of the attachment is later than the common ancestor (rev-1) """ cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] no_conflicts_enabled = params_from_base_test_setup["no_conflicts_enabled"] if no_conflicts_enabled: pytest.skip('--no-conflicts is not enabled, so skipping the test') sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper = ClusterKeywords() cluster_helper.reset_cluster(cluster_config, sg_conf) topology = cluster_helper.get_cluster_topology(cluster_config) cbs_url = topology["couchbase_servers"][0] sg_url = topology["sync_gateways"][0]["public"] sg_url_admin = topology["sync_gateways"][0]["admin"] sg_db = "db" bucket = "data-bucket" log_info( "Running 'test_attachment_revpos_when_ancestor_unavailable_active_revision_doesnt_share_ancestor'" ) log_info("Using cbs_url: {}".format(cbs_url)) log_info("Using sg_url: {}".format(sg_url)) log_info("Using sg_url_admin: {}".format(sg_url_admin)) log_info("Using sg_db: {}".format(sg_db)) log_info("Using bucket: {}".format(bucket)) sg_user_name = "sg_user" sg_user_password = "******" sg_user_channels = ["NBC"] client = MobileRestClient() client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels) sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name) doc = document.create_doc(doc_id="doc_1", content={"sample_key": "sample_val"}, channels=sg_user_channels) doc_gen_1 = client.add_doc(url=sg_url, db=sg_db, doc=doc, auth=sg_user_session) client.update_doc(url=sg_url, db=sg_db, doc_id=doc_gen_1["id"], attachment_name="sample_text.txt", auth=sg_user_session) client.update_doc(url=sg_url, db=sg_db, doc_id=doc_gen_1["id"], auth=sg_user_session) client.update_doc(url=sg_url, db=sg_db, doc_id=doc_gen_1["id"], auth=sg_user_session) parent_rev_list = ["2-foo2", doc_gen_1["rev"]] # Sync Gateway should error since it has no references attachment in its ancestors with pytest.raises(HTTPError) as he: client.add_conflict(url=sg_url, db=sg_db, doc_id=doc_gen_1["id"], parent_revisions=parent_rev_list, new_revision="3-foo3", auth=sg_user_session) assert he.value[0].startswith("400 Client Error: Bad Request for url: ")
def test_stale_revision_should_not_be_in_the_index(setup_client_syncgateway_test): """original ticket: https://github.com/couchbase/couchbase-lite-android/issues/855 scenario: 1. Running sync_gateway 2. Create database and starts both push and pull replicators through client REST API 3. Create two or more views through client REST API 4. Add doc, and verify doc is index with current revision through client REST API 5. Make sure document is pushed to sync gateway through sync gateway REST API 6. Update doc with sync gateway (not client side) through sync gateway REST API 7. Make sure updated document is pull replicated to client through client REST API 8. Make sure updated document is indexed through client REST API 9. Make sure stale revision is deleted from index. through client REST API 10. Pass criteria """ cluster_config = setup_client_syncgateway_test["cluster_config"] sg_mode = setup_client_syncgateway_test["sg_mode"] ls_url = setup_client_syncgateway_test["ls_url"] sg_url = setup_client_syncgateway_test["sg_url"] sg_admin_url = setup_client_syncgateway_test["sg_admin_url"] num_docs = 10 num_revs = 100 d_doc_name = "dd" sg_db = "db" sg_user_name = "sg_user" sg_config = sync_gateway_config_path_for_mode("listener_tests/listener_tests", sg_mode) c = cluster.Cluster(config=cluster_config) c.reset(sg_config_path=sg_config) log_info("Running 'test_stale_revision_should_not_be_in_the_index'") log_info("ls_url: {}".format(ls_url)) log_info("sg_admin_url: {}".format(sg_admin_url)) log_info("sg_url: {}".format(sg_url)) log_info("num_docs: {}".format(num_docs)) log_info("num_revs: {}".format(num_revs)) client = MobileRestClient() sg_user_channels = ["NBC"] client.create_user(url=sg_admin_url, db=sg_db, name=sg_user_name, password="******", channels=sg_user_channels) sg_session = client.create_session(url=sg_admin_url, db=sg_db, name=sg_user_name) view = """{ "language" : "javascript", "views" : { "content_view" : { "map" : "function(doc, meta) { if (doc.content) { emit(doc._id, doc._rev); } }" }, "update_view" : { "map" : "function(doc, meta) { emit(doc.updates, null); }" } } }""" ls_db = client.create_database(url=ls_url, name="ls_db") # Setup continuous push / pull replication from ls_db1 to sg_db client.start_replication( url=ls_url, continuous=True, from_db=ls_db, to_url=sg_admin_url, to_db=sg_db ) client.start_replication( url=ls_url, continuous=True, from_url=sg_admin_url, from_db=sg_db, to_db=ls_db ) design_doc_id = client.add_design_doc(url=ls_url, db=ls_db, name=d_doc_name, doc=view) client.get_doc(url=ls_url, db=ls_db, doc_id=design_doc_id) doc_body = document.create_doc(doc_id="doc_1", content={"hi": "I should be in the view"}, channels=sg_user_channels) log_info(doc_body) doc_body_2 = document.create_doc(doc_id="doc_2", channels=sg_user_channels) doc = client.add_doc(url=ls_url, db=ls_db, doc=doc_body) doc_2 = client.add_doc(url=ls_url, db=ls_db, doc=doc_body_2) content_view_rows = client.get_view(url=ls_url, db=ls_db, design_doc_name=d_doc_name, view_name="content_view") client.verify_view_row_num(view_response=content_view_rows, expected_num_rows=1) update_view_rows = client.get_view(url=ls_url, db=ls_db, design_doc_name=d_doc_name, view_name="update_view") client.verify_view_row_num(view_response=update_view_rows, expected_num_rows=2) expected_docs_list = [doc, doc_2] client.verify_docs_present(url=sg_url, db=sg_db, expected_docs=expected_docs_list, auth=sg_session) updated_doc = client.update_doc(url=sg_url, db=sg_db, doc_id=doc["id"], number_updates=10, auth=sg_session) client.verify_docs_present(url=ls_url, db=ls_db, expected_docs=updated_doc) content_view_rows_2 = client.get_view(url=ls_url, db=ls_db, design_doc_name=d_doc_name, view_name="content_view") client.verify_view_row_num(view_response=content_view_rows_2, expected_num_rows=1) client.verify_view_contains_keys(view_response=content_view_rows_2, keys=doc["id"]) client.verify_view_contains_values(view_response=content_view_rows_2, values=updated_doc["rev"])
def test_mobile_opt_in(params_from_base_test_setup, sg_conf_name): """ Scenario: Enable mobile opt in sync function in sync-gateway configuration file - Check xattrs/mobile-opt-in_cc or di json files - 8 cases covered - doc : https://docs.google.com/document/d/1XxLIBsjuj_UxTTJs4Iu7C7uZdos8ZEzeckrVc17y3sw/edit - #1 Create doc via sdk with mobile opt in and verify doc is imported - #2 Create doc via sdk with mobile opt out and verify doc is not imported - #3 Create doc via sg with mobile opt in and update via sdk and verify doc is imported - #4 Create doc via sg with mobile opt out and update via sdk and verify doc is not imported - Try to update same doc via sg and verify 409 conflict error is thrown - Create a doc with same doc id and verify doc is created successfully - #5 Create doc via sg with mobile opt out and update via sdk which created no revisions - Now do sdk create with mobile opt in should import case #5 - #6 Create doc via sg with mobile opt out and update via sdk with opt in - Verify type is overrided and doc is imported - #7 Create doc via sg with mobile opt in and update via sdk with opt out - Verify type is overrided and doc is not imported - #8 Disable import in the sg config and have mobile opt in function Create doc via sdk with mobile property and verify sg update succeeds - #9 Same config as #8 and have mobile opt in function in config Create doc via sdk without mobile property and create new doc via sg with same doc id and verify it succeeds """ bucket_name = 'data-bucket' sg_db = 'db' cluster_conf = params_from_base_test_setup['cluster_config'] cluster_topology = params_from_base_test_setup['cluster_topology'] mode = params_from_base_test_setup['mode'] xattrs_enabled = params_from_base_test_setup['xattrs_enabled'] # This test should only run when using xattr meta storage if not xattrs_enabled: pytest.skip('XATTR tests require --xattrs flag') sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) sg_admin_url = cluster_topology['sync_gateways'][0]['admin'] sg_url = cluster_topology['sync_gateways'][0]['public'] cbs_url = cluster_topology['couchbase_servers'][0] log_info('sg_conf: {}'.format(sg_conf)) log_info('sg_admin_url: {}'.format(sg_admin_url)) log_info('sg_url: {}'.format(sg_url)) log_info('cbs_url: {}'.format(cbs_url)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) # Create clients sg_client = MobileRestClient() cbs_ip = host_for_url(cbs_url) sdk_client = Bucket('couchbase://{}/{}'.format(cbs_ip, bucket_name), password='******', timeout=SDK_TIMEOUT) # Create user / session auto_user_info = UserInfo(name='autotest', password='******', channels=['mobileOptIn'], roles=[]) sg_client.create_user( url=sg_admin_url, db=sg_db, name=auto_user_info.name, password=auto_user_info.password, channels=auto_user_info.channels ) test_auth_session = sg_client.create_session( url=sg_admin_url, db=sg_db, name=auto_user_info.name, password=auto_user_info.password ) def update_mobile_prop(): return { 'updates': 0, 'type': 'mobile', } def update_non_mobile_prop(): return { 'updates': 0, 'test': 'true', 'type': 'mobile opt out', } # Create first doc via SDK with type mobile. Case #1 doc_id1 = 'mobile_opt_in_sdk_doc' doc = document.create_doc(doc_id=doc_id1, channels=['mobileOptIn'], prop_generator=update_mobile_prop) sdk_client.upsert(doc_id1, doc) sg_get_doc1 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id1, auth=test_auth_session) assert sg_get_doc1['_rev'].startswith('1-') and sg_get_doc1['_id'] == doc_id1 # Additional coverage for case #1 sg_client.update_doc(url=sg_url, db=sg_db, doc_id=doc_id1, number_updates=1, auth=test_auth_session) sg_get_doc1 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id1, auth=test_auth_session) assert sg_get_doc1['_rev'].startswith('2-') and sg_get_doc1['_id'] == doc_id1 # Create second doc via SDK with type non mobile. Case #2 doc_id2 = 'mobile_opt_out_sdk_doc' doc = document.create_doc(doc_id=doc_id2, channels=['mobileOptIn'], prop_generator=update_non_mobile_prop) sdk_client.upsert(doc_id2, doc) with pytest.raises(HTTPError) as he: sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id2, auth=test_auth_session) log_info(he.value) assert he.value.message.startswith('404 Client Error: Not Found for url:') # Create third sg doc with mobile opt in and update via sdk. Case #3 doc_id3 = 'mobile_opt_in_sg_doc' doc_body = document.create_doc(doc_id=doc_id3, channels=['mobileOptIn'], prop_generator=update_mobile_prop) doc = sg_client.add_doc(url=sg_url, db=sg_db, doc=doc_body, auth=test_auth_session) sg_get_doc3 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id3, auth=test_auth_session) sg_get_doc3["updated_sdk_via_sg"] = "1" sdk_client.upsert(doc_id3, sg_get_doc3) sg_get_doc3 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id3, auth=test_auth_session) assert sg_get_doc3['_rev'].startswith('2-') and sg_get_doc3['_id'] == doc_id3 log_info("sg get doc3 is {}".format(sg_get_doc3)) # Create fourth sg doc with mobile opt out and update via sdk. Case #4 and case #8 doc_id4 = 'mobile_opt_out_sg_doc' doc_body = document.create_doc(doc_id=doc_id4, channels=['mobileOptIn'], prop_generator=update_non_mobile_prop) doc = sg_client.add_doc(url=sg_url, db=sg_db, doc=doc_body, auth=test_auth_session) # update vis SDK sg_get_doc4 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id4, auth=test_auth_session) rev = sg_get_doc4['_rev'] sg_get_doc4["updated_sdk_via_sg"] = "1" sdk_client.upsert(doc_id4, sg_get_doc4) with pytest.raises(HTTPError) as he: sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id4, auth=test_auth_session) log_info(he.value) assert he.value.message.startswith('404 Client Error: Not Found for url:') # update via SG with pytest.raises(HTTPError) as he: sg_client.put_doc(url=sg_url, db=sg_db, doc_id=doc_id4, doc_body={'sg_rewrite': 'True'}, rev=rev, auth=test_auth_session) log_info(he.value) assert he.value.message.startswith('409 Client Error: Conflict for url:') # Create same doc again to verify there is not existing key error covers case #8 doc_body = document.create_doc(doc_id=doc_id4, channels=['mobileOptIn'], prop_generator=update_non_mobile_prop) sg_get_doc4_1 = sg_client.add_doc(url=sg_url, db=sg_db, doc=doc_body, auth=test_auth_session) log_info("4th doc after recreate vis sg is {}".format(sg_get_doc4_1)) assert sg_get_doc4_1['rev'].startswith('1-') and sg_get_doc4_1['id'] == doc_id4 # Create Fifth sg doc with mobile opt in and delete doc which created no revisions i.e tombstone doc # Now do sdk create with mobile opt in should import case #5 doc_id5 = 'mobile_sdk_recreate_no_activerev' doc_body = document.create_doc(doc_id=doc_id5, channels=['mobileOptIn'], prop_generator=update_mobile_prop) doc = sg_client.add_doc(url=sg_url, db=sg_db, doc=doc_body, auth=test_auth_session) rev = doc['rev'] sg_client.delete_doc(url=sg_url, db=sg_db, doc_id=doc_id5, rev=rev, auth=test_auth_session) # At this point no active revisions for this doc, so now update via sdk with mobile opt in should be successful # in getting doc doc = document.create_doc(doc_id=doc_id5, channels=['mobileOptIn'], prop_generator=update_mobile_prop) sdk_client.upsert(doc_id5, doc) sg_get_doc5 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id5, auth=test_auth_session) log_info("sg get doc 5 is {}".format(sg_get_doc5)) assert sg_get_doc5['_rev'].startswith('1-') and sg_get_doc5['_id'] == doc_id5 # Create sixth sg doc with mobile opt out and update via sdk with opt in doc_id6 = 'mobileoptout_sg_doc_sdkupdate_optin' doc_body = document.create_doc(doc_id=doc_id6, channels=['mobileOptIn'], prop_generator=update_non_mobile_prop) doc = sg_client.add_doc(url=sg_url, db=sg_db, doc=doc_body, auth=test_auth_session) sg_get_doc6 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id6, auth=test_auth_session) log_info("Sg sixth doc is {}".format(sg_get_doc6)) sg_get_doc6["type"] = "mobile" sdk_client.upsert(doc_id6, sg_get_doc6) sg_get_doc6 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id6, auth=test_auth_session) assert sg_get_doc6['_rev'].startswith('2-') and sg_get_doc6['_id'] == doc_id6 # Create seventh sg doc with mobile opt in and update via sdk with opt out doc_id7 = 'mobileoptin_sg_doc_sdkupdate_optout' doc_body = document.create_doc(doc_id=doc_id7, channels=['mobileOptIn'], prop_generator=update_mobile_prop) doc = sg_client.add_doc(url=sg_url, db=sg_db, doc=doc_body, auth=test_auth_session) sg_get_doc7 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id7, auth=test_auth_session) log_info("Sg sixth doc is {}".format(sg_get_doc7)) sg_get_doc7["type"] = "mobile opt out" sdk_client.upsert(doc_id7, sg_get_doc7) with pytest.raises(HTTPError) as he: sg_get_doc7 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id7, auth=test_auth_session) log_info(he.value) assert he.value.message.startswith('404 Client Error: Not Found for url:') # TODO : verify _changes that it shows tombstone revisions -> it will happen on 2.0 # Create eighth sdk doc with import disabled and add mobile property and update via sg. Case #7 sg_conf_name = "xattrs/mobile_opt_in_no_import" sg_no_import_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) sg_util = SyncGateway() sg_util.start_sync_gateways(cluster_config=cluster_conf, url=sg_url, config=sg_no_import_conf) doc_id8 = 'mobile_opt_in_sg_rewrite_with_importdisabled' doc_body = document.create_doc(doc_id=doc_id8, channels=['mobileOptIn'], prop_generator=update_mobile_prop) sdk_client.upsert(doc_id8, doc_body) with pytest.raises(HTTPError) as he: sg_client.add_doc(url=sg_url, db=sg_db, doc=doc_body, auth=test_auth_session) log_info(he.value) assert he.value.message.startswith('409 Client Error: Conflict for url:') sg_client.update_doc(url=sg_url, db=sg_db, doc_id=doc_id8, number_updates=1, auth=test_auth_session) sg_get_doc8 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id8, auth=test_auth_session) assert sg_get_doc8['_rev'].startswith('2-') and sg_get_doc8['_id'] == doc_id8 # Create ninth sdk doc with import disabled and add mobile property and update via sg. Case #8 doc_id9 = 'mobile_opt_out_sg_rewrite_with_importdisabled' doc_body = document.create_doc(doc_id=doc_id9, channels=['mobileOptIn'], prop_generator=update_non_mobile_prop) sdk_client.upsert(doc_id9, doc_body) sg_client.add_doc(url=sg_url, db=sg_db, doc=doc_body, auth=test_auth_session) # sg_client.update_doc(url=sg_url, db=sg_db, doc_id=doc_id8, number_updates=1, auth=test_auth_session) sg_get_doc9 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id9, auth=test_auth_session) assert sg_get_doc9['_rev'].startswith('1-') and sg_get_doc9['_id'] == doc_id9
def test_auto_prune_listener_keeps_conflicts_sanity(setup_client_syncgateway_test): """" 1. Create db on LiteServ and add docs 2. Create db on sync_gateway and add docs with the same id 3. Create one shot push / pull replication 4. Update LiteServ 50 times 5. Assert that pruned conflict is still present 6. Delete the current revision and check that a GET returns the old conflict as the current rev """ cluster_config = setup_client_syncgateway_test["cluster_config"] ls_url = setup_client_syncgateway_test["ls_url"] sg_url = setup_client_syncgateway_test["sg_url"] sg_admin_url = setup_client_syncgateway_test["sg_admin_url"] client = MobileRestClient() sg_helper = SyncGateway() sg_helper.start_sync_gateway( cluster_config=cluster_config, url=sg_url, config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS) ) log_info("Running 'test_auto_prune_listener_keeps_conflicts_sanity' ...") log_info("ls_url: {}".format(ls_url)) log_info("sg_url: {}".format(sg_url)) log_info("sg_admin_url: {}".format(sg_admin_url)) num_docs = 1 num_revs = 100 sg_db = "db" ls_db = "ls_db" sg_user_name = "sg_user" sg_user_channels = ["NBC"] client.create_user(url=sg_admin_url, db=sg_db, name=sg_user_name, password="******", channels=sg_user_channels) sg_session = client.create_session(url=sg_admin_url, db=sg_db, name=sg_user_name) ls_db = client.create_database(url=ls_url, name=ls_db) # Create docs with same prefix to create conflicts when the dbs complete 1 shot replication ls_db_docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs, id_prefix="doc", channels=sg_user_channels) assert len(ls_db_docs) == num_docs sg_db_docs = client.add_docs( url=sg_url, db=sg_db, number=num_docs, id_prefix="doc", channels=sg_user_channels, auth=sg_session ) assert len(sg_db_docs) == num_docs # Setup one shot pull replication and wait for idle. client.start_replication(url=ls_url, continuous=False, from_url=sg_admin_url, from_db=sg_db, to_db=ls_db) client.wait_for_no_replications(url=ls_url) # There should now be a conflict on the client conflicting_revs = client.get_conflict_revs(url=ls_url, db=ls_db, doc=ls_db_docs[0]) # Get the doc with conflict rev client.get_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], rev=conflicting_revs[0]) # Update doc past revs limit and make sure conflict is still available updated_doc = client.update_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], number_updates=num_revs) client.get_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], rev=conflicting_revs[0]) # Delete doc and ensure that the conflict is now the current rev client.delete_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], rev=updated_doc["rev"]) current_doc = client.get_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"]) assert current_doc["_rev"] == conflicting_revs[0]
def test_attachment_revpos_when_ancestor_unavailable( params_from_base_test_setup, sg_conf_name): """ Creates a document with an attachment, then updates that document so that the body of the revision that originally pushed the document is no longer available. Add a new revision that's not a child of the active revision, and validate that it's uploaded successfully. Example: 1. Document is created with attachment at rev-1 2. Document is updated (strip digests and length, only put revpos & stub) multiple times on the server, goes to rev-10 3. Client attempts to add a new (conflicting) revision 2, with parent rev-1. 4. If the body of rev-1 is no longer available on the server (temporary backup of revision has expired, and is no longer stored in the in-memory rev cache), we were throwing an error to client because we couldn't verify based on the _attachments property in rev-1. 5. In this scenario, before returning error, we are now checking if the active revision has a common ancestor with the incoming revision. If so, we can validate any revpos values equal to or earlier than the common ancestor against the active revision """ cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] no_conflicts_enabled = params_from_base_test_setup["no_conflicts_enabled"] if no_conflicts_enabled: pytest.skip('--no-conflicts is not enabled, so skipping the test') sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper = ClusterKeywords() cluster_helper.reset_cluster(cluster_config, sg_conf) topology = cluster_helper.get_cluster_topology(cluster_config) cbs_url = topology["couchbase_servers"][0] sg_url = topology["sync_gateways"][0]["public"] sg_url_admin = topology["sync_gateways"][0]["admin"] sg_db = "db" bucket = "data-bucket" log_info("Running 'test_attachment_revpos_when_ancestor_unavailable'") log_info("Using cbs_url: {}".format(cbs_url)) log_info("Using sg_url: {}".format(sg_url)) log_info("Using sg_url_admin: {}".format(sg_url_admin)) log_info("Using sg_db: {}".format(sg_db)) log_info("Using bucket: {}".format(bucket)) channels_list = ["ABC"] client = MobileRestClient() sg_util = SyncGateway() cb_server = couchbaseserver.CouchbaseServer(cbs_url) user1 = client.create_user(url=sg_url_admin, db=sg_db, name="user1", password="******", channels=channels_list) atts = attachment.load_from_data_dir(["sample_text.txt"]) doc_with_att = document.create_doc(doc_id="att_doc", content={"sample_key": "sample_val"}, attachments=atts, channels=channels_list) doc_gen_1 = client.add_doc(url=sg_url, db=sg_db, doc=doc_with_att, auth=user1) client.update_doc(url=sg_url, db=sg_db, doc_id=doc_gen_1["id"], number_updates=10, auth=user1) # Clear cached rev doc bodys from server and cycle sync_gateway sg_util.stop_sync_gateways(cluster_config=cluster_config, url=sg_url) cb_server.delete_couchbase_server_cached_rev_bodies(bucket=bucket) sg_util.start_sync_gateways(cluster_config=cluster_config, url=sg_url, config=sg_conf) client.add_conflict(url=sg_url, db=sg_db, doc_id=doc_gen_1["id"], parent_revisions=doc_gen_1["rev"], new_revision="2-foo", auth=user1)
def test_longpoll_awaken_channels(params_from_base_test_setup, sg_conf_name): cluster_conf = params_from_base_test_setup["cluster_config"] cluster_topology = params_from_base_test_setup["cluster_topology"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) sg_admin_url = cluster_topology["sync_gateways"][0]["admin"] sg_url = cluster_topology["sync_gateways"][0]["public"] log_info("sg_conf: {}".format(sg_conf)) log_info("sg_admin_url: {}".format(sg_admin_url)) log_info("sg_url: {}".format(sg_url)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) adam_user_info = userinfo.UserInfo(name="adam", password="******", channels=["NBC", "ABC"], roles=[]) traun_user_info = userinfo.UserInfo(name="traun", password="******", channels=[], roles=[]) andy_user_info = userinfo.UserInfo(name="andy", password="******", channels=[], roles=[]) sg_db = "db" doc_id = "adam_doc_0" client = MobileRestClient() adam_auth = client.create_user(url=sg_admin_url, db=sg_db, name=adam_user_info.name, password=adam_user_info.password, channels=adam_user_info.channels) traun_auth = client.create_user(url=sg_admin_url, db=sg_db, name=traun_user_info.name, password=traun_user_info.password, channels=traun_user_info.channels) andy_auth = client.create_user(url=sg_admin_url, db=sg_db, name=andy_user_info.name, password=andy_user_info.password, channels=andy_user_info.channels) ############################################################ # changes feed wakes with Channel Access via Admin API ############################################################ # Get starting sequence of docs, use the last seq to progress past any _user docs. adam_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=adam_auth) traun_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=traun_auth) andy_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=andy_auth) with concurrent.futures.ProcessPoolExecutor() as ex: # Start changes feed for 3 users adam_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=adam_changes["last_seq"], timeout=10, auth=adam_auth) traun_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=traun_changes["last_seq"], timeout=10, auth=traun_auth) andy_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=andy_changes["last_seq"], timeout=10, auth=andy_auth) # Wait for changes feed to notice there are no changes and enter wait. 2 seconds should be more than enough time.sleep(2) # Make sure the changes future is still running and has not exited due to any new changes, the feed should be caught up # and waiting assert not adam_changes_task.done() assert not traun_changes_task.done() assert not andy_changes_task.done() # Add add a doc for adam with "NBC" and "ABC" channels # Add one doc, this should wake up the changes feed adam_add_docs_task = ex.submit(client.add_docs, url=sg_url, db=sg_db, number=1, id_prefix="adam_doc", auth=adam_auth, channels=adam_user_info.channels) # Wait for docs adds to complete adam_docs = adam_add_docs_task.result() assert len(adam_docs) == 1 # Assert that the changes feed woke up and that the doc change was propagated adam_changes = adam_changes_task.result() assert len(adam_changes["results"]) == 1 assert adam_changes["results"][0]["id"] == doc_id # Verify that the changes feed is still listening for Traun and Andy assert not traun_changes_task.done() assert not andy_changes_task.done() # Update the traun and andy to have one of adam's channels update_traun_user_task = ex.submit(client.update_user, url=sg_admin_url, db=sg_db, name=traun_user_info.name, password=traun_user_info.password, channels=["NBC"]) traun_auth = update_traun_user_task.result() update_andy_user_task = ex.submit(client.update_user, url=sg_admin_url, db=sg_db, name=andy_user_info.name, password=andy_user_info.password, channels=["ABC"]) andy_auth = update_andy_user_task.result() # Make sure changes feed wakes up and contains at least one change, 2 may be possible if the _user doc is included # Make sure the first change is 'adam_doc' traun_changes = traun_changes_task.result() assert 1 <= len(traun_changes["results"]) <= 2 assert traun_changes["results"][0]["id"] == "adam_doc_0" or traun_changes["results"][0]["id"] == "_user/traun" andy_changes = andy_changes_task.result() assert 1 <= len(andy_changes["results"]) <= 2 assert andy_changes["results"][0]["id"] == "adam_doc_0" or andy_changes["results"][0]["id"] == "_user/andy" # Block until user docs are seen client.verify_doc_id_in_changes(url=sg_url, db=sg_db, expected_doc_id="_user/adam", auth=adam_auth) client.verify_doc_id_in_changes(url=sg_url, db=sg_db, expected_doc_id="_user/traun", auth=traun_auth) client.verify_doc_id_in_changes(url=sg_url, db=sg_db, expected_doc_id="_user/andy", auth=andy_auth) # Make sure that adams doc shows up in changes due to the fact that the changes feed may be woken up with a _user doc above client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=adam_docs, auth=adam_auth) client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=adam_docs, auth=traun_auth) client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=adam_docs, auth=andy_auth) ############################################################ # changes feed wakes with Channel Removal via Sync function ############################################################ # Get latest last_seq for next test section adam_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=adam_auth) traun_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=traun_auth) andy_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=andy_auth) with concurrent.futures.ProcessPoolExecutor() as ex: # Start changes feed for 3 users from latest last_seq adam_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=adam_changes["last_seq"], timeout=10, auth=adam_auth) traun_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=traun_changes["last_seq"], timeout=10, auth=traun_auth) andy_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=andy_changes["last_seq"], timeout=10, auth=andy_auth) # Wait for changes feed to notice there are no changes and enter wait. 2 seconds should be more than enough time.sleep(2) # Make sure the changes future is still running and has not exited due to any new changes, the feed should be caught up # and waiting assert not adam_changes_task.done() assert not traun_changes_task.done() assert not andy_changes_task.done() # Remove the channels property from the doc client.update_doc(url=sg_url, db=sg_db, doc_id=doc_id, auth=traun_auth, channels=[]) # All three changes feeds should wake up and return one result adam_changes = adam_changes_task.result() assert len(adam_changes["results"]) == 1 assert adam_changes["results"][0]["removed"] == ["ABC", "NBC"] traun_changes = traun_changes_task.result() assert len(traun_changes["results"]) == 1 assert traun_changes["results"][0]["removed"] == ["NBC"] andy_changes = andy_changes_task.result() assert len(andy_changes["results"]) == 1 assert andy_changes["results"][0]["removed"] == ["ABC"] # Verify that users no longer can access the doc for user_auth in [adam_auth, traun_auth, andy_auth]: with pytest.raises(requests.exceptions.HTTPError) as excinfo: client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id, auth=user_auth) assert "403 Client Error: Forbidden for url:" in excinfo.value.message ############################################################ # changes feed wakes with Channel Grant via Sync function ############################################################ # Get latest last_seq for next test section adam_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=adam_auth) traun_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=traun_auth) andy_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=andy_auth) admin_auth = client.create_user(url=sg_admin_url, db=sg_db, name="admin", password="******", channels=["admin"]) channel_grant_doc_id = "channel_grant_with_doc_intially" # Add another doc with no channels channel_grant_doc_body = document.create_doc(doc_id=channel_grant_doc_id, channels=["admin"]) client.add_doc(url=sg_url, db=sg_db, doc=channel_grant_doc_body, auth=admin_auth) with concurrent.futures.ProcessPoolExecutor() as ex: # Start changes feed for 3 users from latest last_seq adam_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=adam_changes["last_seq"], timeout=10, auth=adam_auth) traun_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=traun_changes["last_seq"], timeout=10, auth=traun_auth) andy_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=andy_changes["last_seq"], timeout=10, auth=andy_auth) # Wait for changes feed to notice there are no changes and enter wait. 2 seconds should be more than enough time.sleep(2) # Make sure the changes future is still running and has not exited due to any new changes, the feed should be caught up # and waiting assert not adam_changes_task.done() assert not traun_changes_task.done() assert not andy_changes_task.done() # update the grant doc to have channel for all users update_task = ex.submit(client.update_doc, url=sg_url, db=sg_db, doc_id=channel_grant_doc_id, auth=admin_auth, channels=["admin", "ABC", "NBC"]) updated_doc = update_task.result() assert updated_doc["rev"].startswith("2-") # Verify that access grant wakes up changes feed for adam, traun, and Andy adam_changes = adam_changes_task.result() assert len(adam_changes["results"]) == 1 assert adam_changes["results"][0]["id"] == "channel_grant_with_doc_intially" assert adam_changes["results"][0]["changes"][0]["rev"].startswith("2-") traun_changes = traun_changes_task.result() assert len(traun_changes["results"]) == 1 assert traun_changes["results"][0]["id"] == "channel_grant_with_doc_intially" assert traun_changes["results"][0]["changes"][0]["rev"].startswith("2-") andy_changes = andy_changes_task.result() assert len(andy_changes["results"]) == 1 assert andy_changes["results"][0]["id"] == "channel_grant_with_doc_intially" assert andy_changes["results"][0]["changes"][0]["rev"].startswith("2-")
def test_attachment_revpos_when_ancestor_unavailable(params_from_base_test_setup, sg_conf_name): """ Creates a document with an attachment, then updates that document so that the body of the revision that originally pushed the document is no longer available. Add a new revision that's not a child of the active revision, and validate that it's uploaded successfully. Example: 1. Document is created with attachment at rev-1 2. Document is updated (strip digests and length, only put revpos & stub) multiple times on the server, goes to rev-4 3. Client attempts to add a new (conflicting) revision 2, with parent rev-1. 4. If the body of rev-1 is no longer available on the server (temporary backup of revision has expired, and is no longer stored in the in-memory rev cache), we were throwing an error to client because we couldn't verify based on the _attachments property in rev-1. 5. In this scenario, before returning error, we are now checking if the active revision has a common ancestor with the incoming revision. If so, we can validate any revpos values equal to or earlier than the common ancestor against the active revision """ cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper = ClusterKeywords() cluster_helper.reset_cluster(cluster_config, sg_conf) topology = cluster_helper.get_cluster_topology(cluster_config) cbs_url = topology["couchbase_servers"][0] sg_url = topology["sync_gateways"][0]["public"] sg_url_admin = topology["sync_gateways"][0]["admin"] sg_db = "db" bucket = "data-bucket" log_info("Running 'test_attachment_revpos_when_ancestor_unavailable'") log_info("Using cbs_url: {}".format(cbs_url)) log_info("Using sg_url: {}".format(sg_url)) log_info("Using sg_url_admin: {}".format(sg_url_admin)) log_info("Using sg_db: {}".format(sg_db)) log_info("Using bucket: {}".format(bucket)) channels_list = ["ABC"] client = MobileRestClient() sg_util = SyncGateway() cb_server = CouchbaseServer(cbs_url) user1 = client.create_user(url=sg_url_admin, db=sg_db, name="user1", password="******", channels=channels_list) doc_with_att = document.create_doc(doc_id="att_doc", content={"sample_key": "sample_val"}, attachment_name="sample_text.txt", channels=channels_list) doc_gen_1 = client.add_doc(url=sg_url, db=sg_db, doc=doc_with_att, auth=user1) client.update_doc(url=sg_url, db=sg_db, doc_id=doc_gen_1["id"], number_updates=10, auth=user1) # Clear cached rev doc bodys from server and cycle sync_gateway sg_util.stop_sync_gateway(cluster_config=cluster_config, url=sg_url) cb_server.delete_couchbase_server_cached_rev_bodies(bucket=bucket) sg_util.start_sync_gateway(cluster_config=cluster_config, url=sg_url, config=sg_conf) client.add_conflict( url=sg_url, db=sg_db, doc_id=doc_gen_1["id"], parent_revisions=doc_gen_1["rev"], new_revision="2-foo", auth=user1 )
def test_rolling_ttl_remove_expirary(params_from_base_test_setup, sg_conf_name): """ 1. PUT /db/doc1 via SG with property "_exp":3 2. Once per second for 10 seconds, update /db/doc1 with a new revision (also with "_exp":3) 3. Update /db/doc1 with a revision with no expiry 3. Get /db/doc1. Assert response is 200 """ cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] xattrs_enabled = params_from_base_test_setup['xattrs_enabled'] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper = ClusterKeywords() topology = cluster_helper.get_cluster_topology(cluster_config) cluster_helper.reset_cluster(cluster_config=cluster_config, sync_gateway_config=sg_conf) cbs_url = topology["couchbase_servers"][0] sg_url = topology["sync_gateways"][0]["public"] sg_url_admin = topology["sync_gateways"][0]["admin"] log_info("Running 'test_rolling_ttl_remove_expirary'") log_info("cbs_url: {}".format(cbs_url)) log_info("sg_url: {}".format(sg_url)) log_info("sg_url_admin: {}".format(sg_url_admin)) sg_db = "db" sg_user_name = "sg_user" sg_user_password = "******" sg_user_channels = ["NBC", "ABC"] bucket_name = "data-bucket" cbs_ip = host_for_url(cbs_url) sdk_client = Bucket('couchbase://{}/{}'.format(cbs_ip, bucket_name), password='******') client = MobileRestClient() client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels) sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name) doc_exp_3_body = document.create_doc(doc_id="exp_3", expiry=3, channels=sg_user_channels) doc_exp_10_body = document.create_doc(doc_id="exp_10", expiry=10, channels=sg_user_channels) doc_exp_3 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_3_body, auth=sg_user_session) doc_exp_10 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_10_body, auth=sg_user_session) client.update_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], number_updates=10, expiry=3, delay=1, auth=sg_user_session) client.update_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], number_updates=1, auth=sg_user_session) # If expiry was not removed in the last update, this would expire doc_exp_3 time.sleep(5) # doc_exp_3 should still be around due to removal of expiry doc_exp_3 = client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], auth=sg_user_session) assert doc_exp_3["_id"] == "exp_3" # doc_exp_10 should be expired due to the updates (10s) + sleep (5s) with pytest.raises(HTTPError) as he: client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_10["id"], auth=sg_user_session) # In XATTR mode, the expiry results in a tombstone # In Doc Meta mode, the expiry results in a purge if xattrs_enabled: assert he.value[0].startswith("403 Client Error: Forbidden for url:") else: assert he.value[0].startswith("404 Client Error: Not Found for url:") verify_doc_deletion_on_server(doc_id=doc_exp_10["id"], sdk_client=sdk_client, sg_client=client, sg_admin_url=sg_url_admin, sg_db=sg_db, xattrs_enabled=xattrs_enabled)
def test_auto_prune_listener_keeps_conflicts_sanity( setup_client_syncgateway_test): """" 1. Create db on LiteServ and add docs 2. Create db on sync_gateway and add docs with the same id 3. Create one shot push / pull replication 4. Update LiteServ 50 times 5. Assert that pruned conflict is still present 6. Delete the current revision and check that a GET returns the old conflict as the current rev """ cluster_config = setup_client_syncgateway_test["cluster_config"] sg_mode = setup_client_syncgateway_test["sg_mode"] ls_url = setup_client_syncgateway_test["ls_url"] sg_url = setup_client_syncgateway_test["sg_url"] sg_admin_url = setup_client_syncgateway_test["sg_admin_url"] client = MobileRestClient() sg_config = sync_gateway_config_path_for_mode( "listener_tests/listener_tests", sg_mode) c = cluster.Cluster(config=cluster_config) c.reset(sg_config_path=sg_config) log_info("Running 'test_auto_prune_listener_keeps_conflicts_sanity' ...") log_info("ls_url: {}".format(ls_url)) log_info("sg_url: {}".format(sg_url)) log_info("sg_admin_url: {}".format(sg_admin_url)) num_docs = 1 num_revs = 100 sg_db = "db" ls_db = "ls_db" sg_user_name = "sg_user" sg_user_channels = ["NBC"] client.create_user(url=sg_admin_url, db=sg_db, name=sg_user_name, password="******", channels=sg_user_channels) sg_session = client.create_session(url=sg_admin_url, db=sg_db, name=sg_user_name) ls_db = client.create_database(url=ls_url, name=ls_db) # Create docs with same prefix to create conflicts when the dbs complete 1 shot replication ls_db_docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs, id_prefix="doc", channels=sg_user_channels) assert len(ls_db_docs) == num_docs sg_db_docs = client.add_docs(url=sg_url, db=sg_db, number=num_docs, id_prefix="doc", channels=sg_user_channels, auth=sg_session) assert len(sg_db_docs) == num_docs # Wait for changes to be available on Sync Gateway client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=sg_db_docs, auth=sg_session, polling_interval=1) # Setup one shot pull replication and wait for idle. client.start_replication(url=ls_url, continuous=False, from_url=sg_admin_url, from_db=sg_db, to_db=ls_db) client.wait_for_no_replications(url=ls_url) # There should now be a conflict on the client conflicting_revs = client.get_conflict_revs(url=ls_url, db=ls_db, doc=ls_db_docs[0]) # Get the doc with conflict rev client.get_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], rev=conflicting_revs[0]) # Update doc past revs limit and make sure conflict is still available updated_doc = client.update_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], number_updates=num_revs) client.get_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], rev=conflicting_revs[0]) # Delete doc and ensure that the conflict is now the current rev client.delete_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], rev=updated_doc["rev"]) current_doc = client.get_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"]) assert current_doc["_rev"] == conflicting_revs[0]