def test_auto_prune_listener_keeps_conflicts_sanity(setup_client_syncgateway_test): """" 1. Create db on LiteServ and add docs 2. Create db on sync_gateway and add docs with the same id 3. Create one shot push / pull replication 4. Update LiteServ 50 times 5. Assert that pruned conflict is still present 6. Delete the current revision and check that a GET returns the old conflict as the current rev """ cluster_config = setup_client_syncgateway_test["cluster_config"] ls_url = setup_client_syncgateway_test["ls_url"] sg_url = setup_client_syncgateway_test["sg_url"] sg_admin_url = setup_client_syncgateway_test["sg_admin_url"] client = MobileRestClient() sg_helper = SyncGateway() sg_helper.start_sync_gateway( cluster_config=cluster_config, url=sg_url, config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS) ) log_info("Running 'test_auto_prune_listener_keeps_conflicts_sanity' ...") log_info("ls_url: {}".format(ls_url)) log_info("sg_url: {}".format(sg_url)) log_info("sg_admin_url: {}".format(sg_admin_url)) num_docs = 1 num_revs = 100 sg_db = "db" ls_db = "ls_db" sg_user_name = "sg_user" sg_user_channels = ["NBC"] client.create_user(url=sg_admin_url, db=sg_db, name=sg_user_name, password="******", channels=sg_user_channels) sg_session = client.create_session(url=sg_admin_url, db=sg_db, name=sg_user_name) ls_db = client.create_database(url=ls_url, name=ls_db) # Create docs with same prefix to create conflicts when the dbs complete 1 shot replication ls_db_docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs, id_prefix="doc", channels=sg_user_channels) assert len(ls_db_docs) == num_docs sg_db_docs = client.add_docs( url=sg_url, db=sg_db, number=num_docs, id_prefix="doc", channels=sg_user_channels, auth=sg_session ) assert len(sg_db_docs) == num_docs # Setup one shot pull replication and wait for idle. client.start_replication(url=ls_url, continuous=False, from_url=sg_admin_url, from_db=sg_db, to_db=ls_db) client.wait_for_no_replications(url=ls_url) # There should now be a conflict on the client conflicting_revs = client.get_conflict_revs(url=ls_url, db=ls_db, doc=ls_db_docs[0]) # Get the doc with conflict rev client.get_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], rev=conflicting_revs[0]) # Update doc past revs limit and make sure conflict is still available updated_doc = client.update_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], number_updates=num_revs) client.get_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], rev=conflicting_revs[0]) # Delete doc and ensure that the conflict is now the current rev client.delete_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], rev=updated_doc["rev"]) current_doc = client.get_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"]) assert current_doc["_rev"] == conflicting_revs[0]
def test_deleted_docs_from_changes_active_only(params_from_base_test_setup, sg_conf_name): """ https://github.com/couchbase/sync_gateway/issues/2955 1. Create a document 2. Delete the document 3. Restart Sync Gateway (to force rebuild of cache from view) 4. Issue an active_only=true changes request 5. Issue an active_only=false changes request The deleted document was not being included in the result set in step 5. """ cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" num_docs = 10 client = MobileRestClient() # Add doc to SG added_doc = client.add_docs(url=sg_admin_url, db=sg_db, number=num_docs, id_prefix="test_changes") # Delete 1 doc doc_id = added_doc[0]["id"] log_info("Deleting {}".format(doc_id)) doc = client.get_doc(url=sg_admin_url, db=sg_db, doc_id=doc_id) doc_rev = doc['_rev'] client.delete_doc(sg_admin_url, sg_db, doc_id, doc_rev) # Restart SG sg_obj = SyncGateway() sg_obj.restart_sync_gateways(cluster_config) # Changes request with active_only=true session = Session() request_url = "{}/{}/_changes?active_only=true".format(sg_admin_url, sg_db) log_info("Issuing changes request {}".format(request_url)) resp = session.get(request_url) resp.raise_for_status() resp_obj = resp.json() log_info( "Checking that the deleted doc is not included in the active_only=true changes request" ) for d in resp_obj["results"]: assert doc_id not in d # Changes request with active_only=false request_url = "{}/{}/_changes?active_only=false".format( sg_admin_url, sg_db) log_info("Issuing changes request {}".format(request_url)) resp = session.get(request_url) resp.raise_for_status() resp_obj = resp.json() doc_found = False for d in resp_obj["results"]: if doc_id != d["id"]: continue else: assert doc_id == d["id"] assert d["deleted"] doc_found = True break log_info( "Checking that the deleted doc is included in the active_only=false changes request" ) assert doc_found
def test_view_backfill_for_deletes(params_from_base_test_setup, sg_conf_name, validate_changes_before_restart): """ Scenario: 1. Write a bunch of docs 2. Delete 1/2 3. Restart Sync Gateway 4. Issue _changes, assert view backfills docs and delete notifications """ num_docs = 1000 sg_db = 'db' cluster_conf = params_from_base_test_setup['cluster_config'] cluster_topology = params_from_base_test_setup['cluster_topology'] mode = params_from_base_test_setup['mode'] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) sg_admin_url = cluster_topology['sync_gateways'][0]['admin'] sg_url = cluster_topology['sync_gateways'][0]['public'] cbs_url = cluster_topology['couchbase_servers'][0] log_info('sg_conf: {}'.format(sg_conf)) log_info('sg_admin_url: {}'.format(sg_admin_url)) log_info('sg_url: {}'.format(sg_url)) log_info('cbs_url: {}'.format(cbs_url)) log_info('validate_changes_before_restart: {}'.format( validate_changes_before_restart)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) # Create clients sg_client = MobileRestClient() # Create user / session seth_user_info = UserInfo(name='seth', password='******', channels=['NASA', 'NATGEO'], roles=[]) sg_client.create_user(url=sg_admin_url, db=sg_db, name=seth_user_info.name, password=seth_user_info.password, channels=seth_user_info.channels) seth_auth = sg_client.create_session(url=sg_admin_url, db=sg_db, name=seth_user_info.name, password=seth_user_info.password) # Add 'num_docs' to Sync Gateway doc_bodies = document.create_docs('test_doc', number=num_docs, channels=seth_user_info.channels) bulk_resp = sg_client.add_bulk_docs(url=sg_url, db=sg_db, docs=doc_bodies, auth=seth_auth) assert len(bulk_resp) == num_docs # Delete half of the docs randomly deleted_docs = [] for _ in range(num_docs / 2): random_doc = random.choice(bulk_resp) deleted_doc = sg_client.delete_doc(url=sg_url, db=sg_db, doc_id=random_doc['id'], rev=random_doc['rev'], auth=seth_auth) deleted_docs.append(deleted_doc) bulk_resp.remove(random_doc) log_info('Number of docs deleted: {}'.format(len(deleted_docs))) all_docs = bulk_resp + deleted_docs log_info('Number of docs to look for in changes: {}'.format(len(all_docs))) # This test will check changes before and after SG restart if # validate_changes_before_restart == True # If it is not set to True, only build the changes after restart if validate_changes_before_restart: # Verify deletions and inital docs show up in changes feed sg_client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=all_docs, auth=seth_auth) changes = sg_client.get_changes(url=sg_url, db=sg_db, since=0, auth=seth_auth) # All docs should show up + _user doc assert len(changes['results']) == num_docs + 1 deleted_doc_ids = [doc['id'] for doc in deleted_docs] assert len(deleted_doc_ids) == num_docs / 2 deleted_docs_in_changes = [ change['id'] for change in changes['results'] if 'deleted' in change and change['deleted'] ] assert len(deleted_docs_in_changes) == num_docs / 2 # All deleted docs should show up in the changes feed for doc_id in deleted_docs_in_changes: assert doc_id in deleted_doc_ids deleted_doc_ids.remove(doc_id) assert len(deleted_doc_ids) == 0 # Restart Sync Gateway sg_controller = SyncGateway() sg_controller.stop_sync_gateways(url=sg_url, cluster_config=cluster_conf) sg_controller.start_sync_gateways(url=sg_url, cluster_config=cluster_conf, config=sg_conf) # Verify deletions and inital docs show up in changes feed sg_client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=all_docs, auth=seth_auth) changes = sg_client.get_changes(url=sg_url, db=sg_db, since=0, auth=seth_auth) # All docs should show up + _user doc assert len(changes['results']) == num_docs + 1 deleted_doc_ids = [doc['id'] for doc in deleted_docs] assert len(deleted_doc_ids) == num_docs / 2 deleted_docs_in_changes = [ change['id'] for change in changes['results'] if 'deleted' in change and change['deleted'] ] assert len(deleted_docs_in_changes) == num_docs / 2 # All deleted docs should show up in th changes feed for doc_id in deleted_docs_in_changes: assert doc_id in deleted_doc_ids deleted_doc_ids.remove(doc_id) assert len(deleted_doc_ids) == 0
def test_non_winning_revisions(params_from_base_test_setup, sg_conf_name): """ Add non-winning revisions to the revision tree and ensure that the changes feed returns the correct revisions Steps: - Create a doc - Add 5 revs - changes, assert rev starts with "6-" from 0, store "last_seq_1" - Create a conflict off first revision ("2-foo") (POST docs, new_edits == false) - changes, assert rev starts with "6-" from "last_seq_1", store "last_seq_2" - changes, assert rev starts with "6-" from 0 - Add a "3-foo" rev with rev "2-foo" as parent - changes, assert rev starts with "6-" from "last_seq_2", store "last_seq_3" - changes, assert rev starts with "6-" from 0 - add tombstone rev as child of "6-" i.e. issue delete on ("6-") - changes, assert rev starts with "3-foo" from "last_seq_3" - changes, assert rev starts with "3-foo" from 0 """ cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] mode = params_from_base_test_setup["mode"] sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) c = cluster.Cluster(cluster_config) c.reset(sg_conf) client = MobileRestClient() seth_user_info = userinfo.UserInfo( name="seth", password="******", channels=["NATGEO"], roles=[] ) seth_auth = client.create_user( url=sg_admin_url, db=sg_db, name=seth_user_info.name, password=seth_user_info.password, channels=seth_user_info.channels ) test_doc_body = document.create_doc(doc_id="test_doc", channels=seth_user_info.channels) rev_gen_1_doc = client.add_doc(url=sg_url, db=sg_db, doc=test_doc_body, auth=seth_auth) rev_gen_6_doc = client.update_doc(url=sg_url, db=sg_db, doc_id=rev_gen_1_doc["id"], number_updates=5, auth=seth_auth) assert rev_gen_6_doc["rev"].startswith("6-") # Get changes until rev generation 6 document shows up start = time.time() last_seq = 0 while True: if time.time() - start > keywords.constants.CLIENT_REQUEST_TIMEOUT: raise keywords.exceptions.TimeoutError("Wait for Replication Status Idle: TIMEOUT") changes_1 = client.get_changes(url=sg_url, db=sg_db, since=last_seq, auth=seth_auth, skip_user_docs=True) last_seq = changes_1["last_seq"] # break when expected rev shows up in changes feed if changes_1["results"] and changes_1["results"][0]["changes"][0]["rev"].startswith("6-"): break assert len(changes_1["results"]) == 1 assert changes_1["results"][0]["id"] == "test_doc" assert changes_1["results"][0]["changes"][0]["rev"].startswith("6-") # Create a conflict off of rev one rev_gen_2_doc_conflict = client.add_conflict( url=sg_url, db=sg_db, doc_id=rev_gen_1_doc["id"], parent_revisions=rev_gen_1_doc["rev"], new_revision="2-foo", auth=seth_auth ) assert rev_gen_2_doc_conflict["id"] == "test_doc" assert rev_gen_2_doc_conflict["rev"] == "2-foo" # Issue changes since changes_1 last_seq above changes_2 = client.get_changes(url=sg_url, db=sg_db, since=changes_1["last_seq"], auth=seth_auth) assert len(changes_2["results"]) == 1 assert changes_2["results"][0]["id"] == "test_doc" assert changes_2["results"][0]["changes"][0]["rev"].startswith("6-") # Issue changes since 0, strip user doc and make sure the doc is still the '6-' rev changes_from_0_one = client.get_changes(url=sg_url, db=sg_db, since=0, auth=seth_auth, skip_user_docs=True) assert len(changes_from_0_one["results"]) == 1 assert changes_from_0_one["results"][0]["id"] == "test_doc" assert changes_from_0_one["results"][0]["changes"][0]["rev"].startswith("6-") # Create a 3-foo rev with 2-foo as the parent rev_gen_3_doc_conflict = client.add_conflict( url=sg_url, db=sg_db, doc_id=rev_gen_2_doc_conflict["id"], parent_revisions=rev_gen_2_doc_conflict["rev"], new_revision="3-foo", auth=seth_auth ) assert rev_gen_3_doc_conflict["id"] == "test_doc" assert rev_gen_3_doc_conflict["rev"] == "3-foo" # Issue changes since changes_2 last_seq above changes_3 = client.get_changes(url=sg_url, db=sg_db, since=changes_2["last_seq"], auth=seth_auth) assert len(changes_3["results"]) == 1 assert changes_3["results"][0]["id"] == "test_doc" assert changes_3["results"][0]["changes"][0]["rev"].startswith("6-") # Issue changes since 0, strip user doc and make sure the doc is still the '6-' rev changes_from_0_two = client.get_changes(url=sg_url, db=sg_db, since=0, auth=seth_auth, skip_user_docs=True) assert len(changes_from_0_two["results"]) == 1 assert changes_from_0_two["results"][0]["id"] == "test_doc" assert changes_from_0_two["results"][0]["changes"][0]["rev"].startswith("6-") # Delete test_doc at rev 6-* client.delete_doc(url=sg_url, db=sg_db, doc_id=rev_gen_6_doc["id"], rev=rev_gen_6_doc["rev"], auth=seth_auth) # Issue changes since changes_3 last_seq above changes_4 = client.get_changes(url=sg_url, db=sg_db, since=changes_3["last_seq"], auth=seth_auth) assert len(changes_4["results"]) == 1 assert changes_4["results"][0]["id"] == "test_doc" assert changes_4["results"][0]["changes"][0]["rev"] == "3-foo" # Issue a oneshot changes since changes_4 last_seq and assert no results are returned changes_5 = client.get_changes(url=sg_url, db=sg_db, since=changes_4["last_seq"], feed="normal", auth=seth_auth) assert len(changes_5["results"]) == 0
def test_non_winning_revisions(params_from_base_test_setup, sg_conf_name): """ Add non-winning revisions to the revision tree and ensure that the changes feed returns the correct revisions Steps: - Add a doc - Add 5 revs - changes, assert rev starts with "6-" from 0, store "last_seq_1" - Create a conflict off first revision ("2-foo") (POST docs, new_edits == false) - changes, assert rev starts with "6-" from "last_seq_1", store "last_seq_2" - changes, assert rev starts with "6-" from 0 - Add a "3-foo" rev with rev "2-foo" as parent - changes, assert rev starts with "6-" from "last_seq_2", store "last_seq_3" - changes, assert rev starts with "6-" from 0 - add tombstone rev as child of "6-" i.e. issue delete on ("6-") - changes, assert rev starts with "3-foo" from "last_seq_3" - changes, assert rev starts with "3-foo" from 0 """ cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] mode = params_from_base_test_setup["mode"] no_conflicts_enabled = params_from_base_test_setup["no_conflicts_enabled"] if no_conflicts_enabled: pytest.skip( '--no-conflicts is enabled, this test needs to create conflicts, so skipping the test' ) sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) c = cluster.Cluster(cluster_config) c.reset(sg_conf) client = MobileRestClient() seth_user_info = userinfo.UserInfo(name="seth", password="******", channels=["NATGEO"], roles=[]) seth_auth = client.create_user(url=sg_admin_url, db=sg_db, name=seth_user_info.name, password=seth_user_info.password, channels=seth_user_info.channels) test_doc_body = document.create_doc(doc_id="test_doc", channels=seth_user_info.channels) rev_gen_1_doc = client.add_doc(url=sg_url, db=sg_db, doc=test_doc_body, auth=seth_auth) rev_gen_6_doc = client.update_doc(url=sg_url, db=sg_db, doc_id=rev_gen_1_doc["id"], number_updates=5, auth=seth_auth) assert rev_gen_6_doc["rev"].startswith("6-") # Get changes until rev generation 6 document shows up start = time.time() last_seq = 0 while True: if time.time() - start > keywords.constants.CLIENT_REQUEST_TIMEOUT: raise keywords.exceptions.TimeoutError( "Wait for Replication Status Idle: TIMEOUT") changes_1 = client.get_changes(url=sg_url, db=sg_db, since=last_seq, auth=seth_auth, skip_user_docs=True) last_seq = changes_1["last_seq"] # break when expected rev shows up in changes feed if changes_1["results"] and changes_1["results"][0]["changes"][0][ "rev"].startswith("6-"): break assert len(changes_1["results"]) == 1 assert changes_1["results"][0]["id"] == "test_doc" assert changes_1["results"][0]["changes"][0]["rev"].startswith("6-") # Create a conflict off of rev one rev_gen_2_doc_conflict = client.add_conflict( url=sg_url, db=sg_db, doc_id=rev_gen_1_doc["id"], parent_revisions=rev_gen_1_doc["rev"], new_revision="2-foo", auth=seth_auth) assert rev_gen_2_doc_conflict["id"] == "test_doc" assert rev_gen_2_doc_conflict["rev"] == "2-foo" # Issue changes since changes_1 last_seq above changes_2 = client.get_changes(url=sg_url, db=sg_db, since=changes_1["last_seq"], auth=seth_auth) assert len(changes_2["results"]) == 1 assert changes_2["results"][0]["id"] == "test_doc" assert changes_2["results"][0]["changes"][0]["rev"].startswith("6-") # Issue changes since 0, strip user doc and make sure the doc is still the '6-' rev changes_from_0_one = client.get_changes(url=sg_url, db=sg_db, since=0, auth=seth_auth, skip_user_docs=True) assert len(changes_from_0_one["results"]) == 1 assert changes_from_0_one["results"][0]["id"] == "test_doc" assert changes_from_0_one["results"][0]["changes"][0]["rev"].startswith( "6-") # Create a 3-foo rev with 2-foo as the parent rev_gen_3_doc_conflict = client.add_conflict( url=sg_url, db=sg_db, doc_id=rev_gen_2_doc_conflict["id"], parent_revisions=rev_gen_2_doc_conflict["rev"], new_revision="3-foo", auth=seth_auth) assert rev_gen_3_doc_conflict["id"] == "test_doc" assert rev_gen_3_doc_conflict["rev"] == "3-foo" # Issue changes since changes_2 last_seq above changes_3 = client.get_changes(url=sg_url, db=sg_db, since=changes_2["last_seq"], auth=seth_auth) assert len(changes_3["results"]) == 1 assert changes_3["results"][0]["id"] == "test_doc" assert changes_3["results"][0]["changes"][0]["rev"].startswith("6-") # Issue changes since 0, strip user doc and make sure the doc is still the '6-' rev changes_from_0_two = client.get_changes(url=sg_url, db=sg_db, since=0, auth=seth_auth, skip_user_docs=True) assert len(changes_from_0_two["results"]) == 1 assert changes_from_0_two["results"][0]["id"] == "test_doc" assert changes_from_0_two["results"][0]["changes"][0]["rev"].startswith( "6-") # Delete test_doc at rev 6-* client.delete_doc(url=sg_url, db=sg_db, doc_id=rev_gen_6_doc["id"], rev=rev_gen_6_doc["rev"], auth=seth_auth) # Issue changes since changes_3 last_seq above changes_4 = client.get_changes(url=sg_url, db=sg_db, since=changes_3["last_seq"], auth=seth_auth) assert len(changes_4["results"]) == 1 assert changes_4["results"][0]["id"] == "test_doc" assert changes_4["results"][0]["changes"][0]["rev"] == "3-foo" # Issue a oneshot changes since changes_4 last_seq and assert no results are returned changes_5 = client.get_changes(url=sg_url, db=sg_db, since=changes_4["last_seq"], feed="normal", auth=seth_auth) assert len(changes_5["results"]) == 0
def test_mobile_opt_in(params_from_base_test_setup, sg_conf_name): """ Scenario: Enable mobile opt in sync function in sync-gateway configuration file - Check xattrs/mobile-opt-in_cc or di json files - 8 cases covered - doc : https://docs.google.com/document/d/1XxLIBsjuj_UxTTJs4Iu7C7uZdos8ZEzeckrVc17y3sw/edit - #1 Create doc via sdk with mobile opt in and verify doc is imported - #2 Create doc via sdk with mobile opt out and verify doc is not imported - #3 Create doc via sg with mobile opt in and update via sdk and verify doc is imported - #4 Create doc via sg with mobile opt out and update via sdk and verify doc is not imported - Try to update same doc via sg and verify 409 conflict error is thrown - Create a doc with same doc id and verify doc is created successfully - #5 Create doc via sg with mobile opt out and update via sdk which created no revisions - Now do sdk create with mobile opt in should import case #5 - #6 Create doc via sg with mobile opt out and update via sdk with opt in - Verify type is overrided and doc is imported - #7 Create doc via sg with mobile opt in and update via sdk with opt out - Verify type is overrided and doc is not imported - #8 Disable import in the sg config and have mobile opt in function Create doc via sdk with mobile property and verify sg update succeeds - #9 Same config as #8 and have mobile opt in function in config Create doc via sdk without mobile property and create new doc via sg with same doc id and verify it succeeds """ bucket_name = 'data-bucket' sg_db = 'db' cluster_conf = params_from_base_test_setup['cluster_config'] cluster_topology = params_from_base_test_setup['cluster_topology'] mode = params_from_base_test_setup['mode'] xattrs_enabled = params_from_base_test_setup['xattrs_enabled'] # This test should only run when using xattr meta storage if not xattrs_enabled: pytest.skip('XATTR tests require --xattrs flag') sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) sg_admin_url = cluster_topology['sync_gateways'][0]['admin'] sg_url = cluster_topology['sync_gateways'][0]['public'] cbs_url = cluster_topology['couchbase_servers'][0] log_info('sg_conf: {}'.format(sg_conf)) log_info('sg_admin_url: {}'.format(sg_admin_url)) log_info('sg_url: {}'.format(sg_url)) log_info('cbs_url: {}'.format(cbs_url)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) # Create clients sg_client = MobileRestClient() cbs_ip = host_for_url(cbs_url) sdk_client = Bucket('couchbase://{}/{}'.format(cbs_ip, bucket_name), password='******', timeout=SDK_TIMEOUT) # Create user / session auto_user_info = UserInfo(name='autotest', password='******', channels=['mobileOptIn'], roles=[]) sg_client.create_user( url=sg_admin_url, db=sg_db, name=auto_user_info.name, password=auto_user_info.password, channels=auto_user_info.channels ) test_auth_session = sg_client.create_session( url=sg_admin_url, db=sg_db, name=auto_user_info.name, password=auto_user_info.password ) def update_mobile_prop(): return { 'updates': 0, 'type': 'mobile', } def update_non_mobile_prop(): return { 'updates': 0, 'test': 'true', 'type': 'mobile opt out', } # Create first doc via SDK with type mobile. Case #1 doc_id1 = 'mobile_opt_in_sdk_doc' doc = document.create_doc(doc_id=doc_id1, channels=['mobileOptIn'], prop_generator=update_mobile_prop) sdk_client.upsert(doc_id1, doc) sg_get_doc1 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id1, auth=test_auth_session) assert sg_get_doc1['_rev'].startswith('1-') and sg_get_doc1['_id'] == doc_id1 # Additional coverage for case #1 sg_client.update_doc(url=sg_url, db=sg_db, doc_id=doc_id1, number_updates=1, auth=test_auth_session) sg_get_doc1 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id1, auth=test_auth_session) assert sg_get_doc1['_rev'].startswith('2-') and sg_get_doc1['_id'] == doc_id1 # Create second doc via SDK with type non mobile. Case #2 doc_id2 = 'mobile_opt_out_sdk_doc' doc = document.create_doc(doc_id=doc_id2, channels=['mobileOptIn'], prop_generator=update_non_mobile_prop) sdk_client.upsert(doc_id2, doc) with pytest.raises(HTTPError) as he: sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id2, auth=test_auth_session) log_info(he.value) assert he.value.message.startswith('404 Client Error: Not Found for url:') # Create third sg doc with mobile opt in and update via sdk. Case #3 doc_id3 = 'mobile_opt_in_sg_doc' doc_body = document.create_doc(doc_id=doc_id3, channels=['mobileOptIn'], prop_generator=update_mobile_prop) doc = sg_client.add_doc(url=sg_url, db=sg_db, doc=doc_body, auth=test_auth_session) sg_get_doc3 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id3, auth=test_auth_session) sg_get_doc3["updated_sdk_via_sg"] = "1" sdk_client.upsert(doc_id3, sg_get_doc3) sg_get_doc3 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id3, auth=test_auth_session) assert sg_get_doc3['_rev'].startswith('2-') and sg_get_doc3['_id'] == doc_id3 log_info("sg get doc3 is {}".format(sg_get_doc3)) # Create fourth sg doc with mobile opt out and update via sdk. Case #4 and case #8 doc_id4 = 'mobile_opt_out_sg_doc' doc_body = document.create_doc(doc_id=doc_id4, channels=['mobileOptIn'], prop_generator=update_non_mobile_prop) doc = sg_client.add_doc(url=sg_url, db=sg_db, doc=doc_body, auth=test_auth_session) # update vis SDK sg_get_doc4 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id4, auth=test_auth_session) rev = sg_get_doc4['_rev'] sg_get_doc4["updated_sdk_via_sg"] = "1" sdk_client.upsert(doc_id4, sg_get_doc4) with pytest.raises(HTTPError) as he: sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id4, auth=test_auth_session) log_info(he.value) assert he.value.message.startswith('404 Client Error: Not Found for url:') # update via SG with pytest.raises(HTTPError) as he: sg_client.put_doc(url=sg_url, db=sg_db, doc_id=doc_id4, doc_body={'sg_rewrite': 'True'}, rev=rev, auth=test_auth_session) log_info(he.value) assert he.value.message.startswith('409 Client Error: Conflict for url:') # Create same doc again to verify there is not existing key error covers case #8 doc_body = document.create_doc(doc_id=doc_id4, channels=['mobileOptIn'], prop_generator=update_non_mobile_prop) sg_get_doc4_1 = sg_client.add_doc(url=sg_url, db=sg_db, doc=doc_body, auth=test_auth_session) log_info("4th doc after recreate vis sg is {}".format(sg_get_doc4_1)) assert sg_get_doc4_1['rev'].startswith('1-') and sg_get_doc4_1['id'] == doc_id4 # Create Fifth sg doc with mobile opt in and delete doc which created no revisions i.e tombstone doc # Now do sdk create with mobile opt in should import case #5 doc_id5 = 'mobile_sdk_recreate_no_activerev' doc_body = document.create_doc(doc_id=doc_id5, channels=['mobileOptIn'], prop_generator=update_mobile_prop) doc = sg_client.add_doc(url=sg_url, db=sg_db, doc=doc_body, auth=test_auth_session) rev = doc['rev'] sg_client.delete_doc(url=sg_url, db=sg_db, doc_id=doc_id5, rev=rev, auth=test_auth_session) # At this point no active revisions for this doc, so now update via sdk with mobile opt in should be successful # in getting doc doc = document.create_doc(doc_id=doc_id5, channels=['mobileOptIn'], prop_generator=update_mobile_prop) sdk_client.upsert(doc_id5, doc) sg_get_doc5 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id5, auth=test_auth_session) log_info("sg get doc 5 is {}".format(sg_get_doc5)) assert sg_get_doc5['_rev'].startswith('1-') and sg_get_doc5['_id'] == doc_id5 # Create sixth sg doc with mobile opt out and update via sdk with opt in doc_id6 = 'mobileoptout_sg_doc_sdkupdate_optin' doc_body = document.create_doc(doc_id=doc_id6, channels=['mobileOptIn'], prop_generator=update_non_mobile_prop) doc = sg_client.add_doc(url=sg_url, db=sg_db, doc=doc_body, auth=test_auth_session) sg_get_doc6 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id6, auth=test_auth_session) log_info("Sg sixth doc is {}".format(sg_get_doc6)) sg_get_doc6["type"] = "mobile" sdk_client.upsert(doc_id6, sg_get_doc6) sg_get_doc6 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id6, auth=test_auth_session) assert sg_get_doc6['_rev'].startswith('2-') and sg_get_doc6['_id'] == doc_id6 # Create seventh sg doc with mobile opt in and update via sdk with opt out doc_id7 = 'mobileoptin_sg_doc_sdkupdate_optout' doc_body = document.create_doc(doc_id=doc_id7, channels=['mobileOptIn'], prop_generator=update_mobile_prop) doc = sg_client.add_doc(url=sg_url, db=sg_db, doc=doc_body, auth=test_auth_session) sg_get_doc7 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id7, auth=test_auth_session) log_info("Sg sixth doc is {}".format(sg_get_doc7)) sg_get_doc7["type"] = "mobile opt out" sdk_client.upsert(doc_id7, sg_get_doc7) with pytest.raises(HTTPError) as he: sg_get_doc7 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id7, auth=test_auth_session) log_info(he.value) assert he.value.message.startswith('404 Client Error: Not Found for url:') # TODO : verify _changes that it shows tombstone revisions -> it will happen on 2.0 # Create eighth sdk doc with import disabled and add mobile property and update via sg. Case #7 sg_conf_name = "xattrs/mobile_opt_in_no_import" sg_no_import_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) sg_util = SyncGateway() sg_util.start_sync_gateways(cluster_config=cluster_conf, url=sg_url, config=sg_no_import_conf) doc_id8 = 'mobile_opt_in_sg_rewrite_with_importdisabled' doc_body = document.create_doc(doc_id=doc_id8, channels=['mobileOptIn'], prop_generator=update_mobile_prop) sdk_client.upsert(doc_id8, doc_body) with pytest.raises(HTTPError) as he: sg_client.add_doc(url=sg_url, db=sg_db, doc=doc_body, auth=test_auth_session) log_info(he.value) assert he.value.message.startswith('409 Client Error: Conflict for url:') sg_client.update_doc(url=sg_url, db=sg_db, doc_id=doc_id8, number_updates=1, auth=test_auth_session) sg_get_doc8 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id8, auth=test_auth_session) assert sg_get_doc8['_rev'].startswith('2-') and sg_get_doc8['_id'] == doc_id8 # Create ninth sdk doc with import disabled and add mobile property and update via sg. Case #8 doc_id9 = 'mobile_opt_out_sg_rewrite_with_importdisabled' doc_body = document.create_doc(doc_id=doc_id9, channels=['mobileOptIn'], prop_generator=update_non_mobile_prop) sdk_client.upsert(doc_id9, doc_body) sg_client.add_doc(url=sg_url, db=sg_db, doc=doc_body, auth=test_auth_session) # sg_client.update_doc(url=sg_url, db=sg_db, doc_id=doc_id8, number_updates=1, auth=test_auth_session) sg_get_doc9 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id9, auth=test_auth_session) assert sg_get_doc9['_rev'].startswith('1-') and sg_get_doc9['_id'] == doc_id9
def test_auto_prune_listener_keeps_conflicts_sanity( setup_client_syncgateway_test): """" 1. Create db on LiteServ and add docs 2. Create db on sync_gateway and add docs with the same id 3. Create one shot push / pull replication 4. Update LiteServ 50 times 5. Assert that pruned conflict is still present 6. Delete the current revision and check that a GET returns the old conflict as the current rev """ cluster_config = setup_client_syncgateway_test["cluster_config"] sg_mode = setup_client_syncgateway_test["sg_mode"] ls_url = setup_client_syncgateway_test["ls_url"] sg_url = setup_client_syncgateway_test["sg_url"] sg_admin_url = setup_client_syncgateway_test["sg_admin_url"] client = MobileRestClient() sg_config = sync_gateway_config_path_for_mode( "listener_tests/listener_tests", sg_mode) c = cluster.Cluster(config=cluster_config) c.reset(sg_config_path=sg_config) log_info("Running 'test_auto_prune_listener_keeps_conflicts_sanity' ...") log_info("ls_url: {}".format(ls_url)) log_info("sg_url: {}".format(sg_url)) log_info("sg_admin_url: {}".format(sg_admin_url)) num_docs = 1 num_revs = 100 sg_db = "db" ls_db = "ls_db" sg_user_name = "sg_user" sg_user_channels = ["NBC"] client.create_user(url=sg_admin_url, db=sg_db, name=sg_user_name, password="******", channels=sg_user_channels) sg_session = client.create_session(url=sg_admin_url, db=sg_db, name=sg_user_name) ls_db = client.create_database(url=ls_url, name=ls_db) # Create docs with same prefix to create conflicts when the dbs complete 1 shot replication ls_db_docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs, id_prefix="doc", channels=sg_user_channels) assert len(ls_db_docs) == num_docs sg_db_docs = client.add_docs(url=sg_url, db=sg_db, number=num_docs, id_prefix="doc", channels=sg_user_channels, auth=sg_session) assert len(sg_db_docs) == num_docs # Wait for changes to be available on Sync Gateway client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=sg_db_docs, auth=sg_session, polling_interval=1) # Setup one shot pull replication and wait for idle. client.start_replication(url=ls_url, continuous=False, from_url=sg_admin_url, from_db=sg_db, to_db=ls_db) client.wait_for_no_replications(url=ls_url) # There should now be a conflict on the client conflicting_revs = client.get_conflict_revs(url=ls_url, db=ls_db, doc=ls_db_docs[0]) # Get the doc with conflict rev client.get_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], rev=conflicting_revs[0]) # Update doc past revs limit and make sure conflict is still available updated_doc = client.update_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], number_updates=num_revs) client.get_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], rev=conflicting_revs[0]) # Delete doc and ensure that the conflict is now the current rev client.delete_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], rev=updated_doc["rev"]) current_doc = client.get_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"]) assert current_doc["_rev"] == conflicting_revs[0]
def test_document_resurrection(params_from_base_test_setup, sg_conf_name, deletion_type): """ Scenarios: Doc meta mode / tombstone - Create docs (set A) via Sync Gateway - Delete docs (set A) via Sync Gateway - Verify docs (set A) are deleted via Sync Gateway - Create docs (set A) via Sync Gateway - Verify revs (set A) are generation 3 via Sync Gateway Doc meta mode / purge - Create docs (set A) via Sync Gateway - Purge docs (set A) via Sync Gateway - Verify docs (set A) are deleted via Sync Gateway - Create docs (set A) via Sync Gateway - Verify revs (set A) are generation 1 via Sync Gateway XATTRs / tombstone - Create docs (set A) via Sync Gateway - Create docs (set B) via SDK - Delete SDK docs (set B) via Sync Gateway - Delete SG docs (set A) via SDK - Verify docs (set B) are deleted via Sync Gateway - Verify docs (set B) are deleted via SDK - Verify docs (set A) are deleted via Sync Gateway - Verify docs (set A) are deleted via SDK - Create docs (set A) via Sync Gateway - Create docs (set B) via SDK - Verify revs (set A, B) are generation 3 via Sync Gateway XATTRs / purge - Create docs (set A) via Sync Gateway - Create docs (set B) via SDK - Purge SDK docs (set B) via Sync Gateway - Delete SG docs (set A) via SDK - Verify docs (set B) are deleted via Sync Gateway - Verify docs (set B) are deleted via SDK - Verify docs (set A) are deleted via Sync Gateway - Verify docs (set A) are deleted via SDK - Create docs (set A) via Sync Gateway - Create docs (set B) via SDK - Verify revs (set A, B) are generation 1 via Sync Gateway """ cluster_conf = params_from_base_test_setup['cluster_config'] cluster_topology = params_from_base_test_setup['cluster_topology'] mode = params_from_base_test_setup['mode'] xattrs_enabled = params_from_base_test_setup['xattrs_enabled'] cbs_url = cluster_topology['couchbase_servers'][0] sg_admin_url = cluster_topology['sync_gateways'][0]['admin'] sg_url = cluster_topology['sync_gateways'][0]['public'] bucket_name = 'data-bucket' sg_db = 'db' cbs_host = host_for_url(cbs_url) num_docs_per_client = 10 # Reset cluster sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) # Initialize clients sg_client = MobileRestClient() sdk_client = Bucket('couchbase://{}/{}'.format(cbs_host, bucket_name), password='******') # Create Sync Gateway user sg_user_channels = ['NASA', 'NATGEO'] sg_client.create_user(url=sg_admin_url, db=sg_db, name='seth', password='******', channels=sg_user_channels) sg_user_auth = sg_client.create_session(url=sg_admin_url, db=sg_db, name='seth', password='******') # Create / Add docs from SG sg_doc_bodies = document.create_docs( doc_id_prefix='sg_doc', number=num_docs_per_client, content={'foo': 'bar'}, channels=sg_user_channels, attachments_generator=attachment.generate_2_png_10_10 ) sg_doc_ids = [doc['_id'] for doc in sg_doc_bodies] sg_bulk_docs_resp = sg_client.add_bulk_docs(url=sg_url, db=sg_db, docs=sg_doc_bodies, auth=sg_user_auth) assert len(sg_bulk_docs_resp) == num_docs_per_client all_doc_ids = sg_doc_ids assert len(all_doc_ids) == num_docs_per_client if xattrs_enabled: # Create / Add docs from sdk log_info('Adding docs via SDK') sdk_doc_bodies = document.create_docs( doc_id_prefix='sdk_doc', number=num_docs_per_client, content={'foo': 'bar'}, channels=sg_user_channels, ) sdk_docs = {doc['_id']: doc for doc in sdk_doc_bodies} sdk_doc_ids = [doc['_id'] for doc in sdk_doc_bodies] log_info('Creating SDK docs') sdk_client.upsert_multi(sdk_docs) all_doc_ids = sg_doc_ids + sdk_doc_ids assert len(all_doc_ids) == num_docs_per_client * 2 if deletion_type == 'tombstone': # Set the target docs. # Doc meta mode: Delete Sync Gateway docs via Sync Gateway # XATTR mode: Delete SDK docs via Sync Gateway sg_doc_ids_to_delete = sg_doc_ids if xattrs_enabled: sg_doc_ids_to_delete = sdk_doc_ids # SG delete target docs for doc_id in sg_doc_ids_to_delete: doc = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id, auth=sg_user_auth) deleted = sg_client.delete_doc(url=sg_url, db=sg_db, doc_id=doc_id, rev=doc['_rev'], auth=sg_user_auth) log_info(deleted) if xattrs_enabled: log_info('Deleting SG docs via SDK') sdk_client.remove_multi(sg_doc_ids) elif deletion_type == 'purge': # SG Purge all docs all_docs, errors = sg_client.get_bulk_docs(url=sg_url, db=sg_db, doc_ids=all_doc_ids, auth=sg_user_auth) if xattrs_enabled: assert len(all_docs) == num_docs_per_client * 2 assert len(errors) == 0 else: assert len(all_docs) == num_docs_per_client assert len(errors) == 0 log_info('Purging docs via Sync Gateway') sg_client.purge_docs(url=sg_admin_url, db=sg_db, docs=all_docs) else: raise ValueError('Invalid test parameters') # Verify deletes via Sync Gateway deleted_docs_to_verify = sg_doc_ids assert len(deleted_docs_to_verify) == num_docs_per_client # If running is xattr mode, make sure to verify SG + SDK docs if xattrs_enabled: deleted_docs_to_verify = sg_doc_ids + sdk_doc_ids assert len(deleted_docs_to_verify) == num_docs_per_client * 2 if xattrs_enabled and deletion_type == 'tombstone': # Verify SDK + SG docs are deleted from Sync Gateway verify_sg_deletes(sg_client, sg_url, sg_db, deleted_docs_to_verify, sg_user_auth) # Verify SDK + SG docs are deleted from SDK verify_sdk_deletes(sdk_client, deleted_docs_to_verify) elif xattrs_enabled and deletion_type == 'purge': # Verify SDK + SG docs are purged from Sync Gateway verify_sg_purges(sg_client, sg_url, sg_db, deleted_docs_to_verify, sg_user_auth) # Verify SDK + SG docs are deleted from SDK verify_sdk_deletes(sdk_client, deleted_docs_to_verify) elif not xattrs_enabled and deletion_type == 'tombstone': # Doc meta: Verify SG docs are all deleted via SG verify_sg_deletes(sg_client, sg_url, sg_db, deleted_docs_to_verify, sg_user_auth) elif not xattrs_enabled and deletion_type == 'purge': # Doc meta: Verify SG docs are all deleted via SG verify_sg_purges(sg_client, sg_url, sg_db, deleted_docs_to_verify, sg_user_auth) else: raise ValueError('Invalid test parameters') # Recreate deleted docs from Sync Gateway sg_bulk_docs_resp = sg_client.add_bulk_docs(url=sg_url, db=sg_db, docs=sg_doc_bodies, auth=sg_user_auth) assert len(sg_bulk_docs_resp) == num_docs_per_client if xattrs_enabled: log_info('Recreating SDK docs') # Recreate deleted docs from SDK sdk_client.upsert_multi(sdk_docs) # Get docs via Sync Gateway doc_ids_to_get = sg_doc_ids if xattrs_enabled: doc_ids_to_get = sg_doc_ids + sdk_doc_ids docs, errors = sg_client.get_bulk_docs( url=sg_url, db=sg_db, doc_ids=doc_ids_to_get, auth=sg_user_auth, validate=False ) if xattrs_enabled: assert len(docs) == num_docs_per_client * 2 assert len(errors) == 0 else: assert len(docs) == num_docs_per_client assert len(errors) == 0 if xattrs_enabled: # Get SDK docs and makes sure all docs were recreated all_docs_from_sdk = sdk_client.get_multi(doc_ids_to_get) assert len(all_docs_from_sdk) == num_docs_per_client * 2 log_info('Found: {} recreated docs via SDK'.format(len(all_docs_from_sdk))) # Make sure we are able to get recreated docs via SDK doc_ids_to_get_scratch = list(doc_ids_to_get) assert len(doc_ids_to_get_scratch) == num_docs_per_client * 2 for doc_id in all_docs_from_sdk: doc_ids_to_get_scratch.remove(doc_id) assert len(doc_ids_to_get_scratch) == 0 # Make sure we are able to get recreated docs via SDK doc_ids_to_get_scratch = list(doc_ids_to_get) if xattrs_enabled: # SG + SDK docs assert len(doc_ids_to_get_scratch) == num_docs_per_client * 2 else: # SG docs assert len(doc_ids_to_get_scratch) == num_docs_per_client for doc in docs: # Verify expected document revisions if xattrs_enabled: if deletion_type == 'purge': # SG purges SG docs and recreates them, expecting 1- rev # SDK removes SDK docs and recreates them, expecting 1- rev assert doc['_rev'].startswith('1-') else: # SG tombstones SG docs and recreates them, expecting 3- rev # SDK removes SDK docs and recreates them, expecting 1- rev if doc['_id'].startswith('sg_'): assert doc['_rev'].startswith('3-') else: assert doc['_rev'].startswith('1-') else: if deletion_type == 'purge': # SG purges SG docs and recreates them, expecting 1- rev assert doc['_rev'].startswith('1-') else: # SG tombstones SG docs and recreates them, expecting 3- rev assert doc['_rev'].startswith('3-') doc_ids_to_get_scratch.remove(doc['_id']) # Make sure all docs were found assert len(doc_ids_to_get_scratch) == 0
def test_migrate_conflicts_delete_last_rev(params_from_base_test_setup, sg_conf_name, num_of_docs): """ @summary Migrate conflicts to no-conflicts mode and delete last revision and verify revisions exists in open revisions Test case link : https://docs.google.com/spreadsheets/d/1YwI_gCeoBebQKBybkzoAEoXSc0XLReszDA-mPFQapgk/edit#gid=0 covered #19 Steps: 1. Start sg with default(i.e allow_conflicts=true) 2. Add docs to SG. 3. Update docs few times . 4. Create a conflicts and verify it is successful. 5. Modify sg config by enabling allow_conflicts to false 6. restart sg. 7. Delete doc by revision of current active open revision 8. Verify all revisions in history exists in open revisions which got at step 4. """ # Setup cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] no_conflicts_enabled = params_from_base_test_setup["no_conflicts_enabled"] mode = params_from_base_test_setup["mode"] sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sync_gateway_version = params_from_base_test_setup["sync_gateway_version"] sg_db = "db" if no_conflicts_enabled or sync_gateway_version < "2.0": pytest.skip('--no-conflicts is enabled and does not work with sg < 2.0 , so skipping the test') sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) # 1. Start sg with default(i.e allow_conflicts=true) c = cluster.Cluster(cluster_config) c.reset(sg_conf) sg_client = MobileRestClient() channels = ["no-conflicts"] sg_client.create_user(url=sg_admin_url, db=sg_db, name='autotest', password='******', channels=channels) autouser_session = sg_client.create_session(url=sg_admin_url, db=sg_db, name='autotest', password='******') # end of Set up # 2. Add docs to SG. sgdoc_bodies = document.create_docs(doc_id_prefix="sg_docs", number=num_of_docs, channels=channels) sg_docs = sg_client.add_bulk_docs(url=sg_url, db=sg_db, docs=sgdoc_bodies, auth=autouser_session) assert len(sgdoc_bodies) == num_of_docs # 3. Update the docs few times prev_revs = [] for i in xrange(5): update_sg_docs = sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=1, delay=None, auth=autouser_session, channels=channels) rev = update_sg_docs[0]['rev'].split('-')[1] prev_revs.append(rev) # 4. Create a conflicts and verify it is successful. for doc in sg_docs: conflicted_rev = sg_client.add_conflict(url=sg_url, db=sg_db, doc_id=doc["id"], parent_revisions=doc["rev"], new_revision="2-foo", auth=autouser_session) assert conflicted_rev["rev"] == "2-foo" for doc in sg_docs: num_of_open_revs = sg_client.get_open_revs_ids(url=sg_url, db=sg_db, doc_id=doc["id"], rev="2-foo", auth=autouser_session) time.sleep(5) # 5. Enable allow_conflicts = false in SG config and 6. restart sg revs_limit = 2 temp_cluster_config = copy_to_temp_conf(cluster_config, mode) persist_cluster_config_environment_prop(temp_cluster_config, 'no_conflicts_enabled', "True", property_name_check=False) persist_cluster_config_environment_prop(temp_cluster_config, 'revs_limit', revs_limit, property_name_check=False) status = c.sync_gateways[0].restart(config=sg_conf, cluster_config=temp_cluster_config) assert status == 0, "Syncgateway did not start after no conflicts is enabled" sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=1, auth=autouser_session, channels=channels) # 6. Delete doc by revision of current active open revision for doc in sg_docs: num_of_revs = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc["id"], auth=autouser_session) sg_client.delete_doc(url=sg_url, db=sg_db, doc_id=doc["id"], rev=num_of_revs["_rev"], auth=autouser_session) # 7.Verify all revisions in history exists in open revisions which got at step 4. for doc in sg_docs: num_of_revs_history = sg_client.get_revs_num_in_history(url=sg_url, db=sg_db, doc_id=doc["id"], auth=autouser_session) for rev in num_of_revs_history: assert rev in num_of_open_revs, "Expected revision does not exist in revision history "