def test_setting_expiry_in_bulk_docs(params_from_base_test_setup, sg_conf_name): """ 1. PUT /db/_bulk_docs with 10 documents. Set the "_exp":3 on 5 of these documents 2. Wait five seconds 3. POST /db/_bulk_get for the 10 documents. Validate that only the 5 non-expiring documents are returned """ cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper = ClusterKeywords() topology = cluster_helper.get_cluster_topology(cluster_config) cluster_helper.reset_cluster( cluster_config=cluster_config, sync_gateway_config=sg_conf ) cbs_url = topology["couchbase_servers"][0] sg_url = topology["sync_gateways"][0]["public"] sg_url_admin = topology["sync_gateways"][0]["admin"] log_info("Running 'test_setting_expiry_in_bulk_docs'") log_info("cbs_url: {}".format(cbs_url)) log_info("sg_url: {}".format(sg_url)) log_info("sg_url_admin: {}".format(sg_url_admin)) sg_db = "db" sg_user_name = "sg_user" sg_user_password = "******" sg_user_channels = ["NBC", "ABC"] client = MobileRestClient() client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels) sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name) doc_exp_3_bodies = document.create_docs(doc_id_prefix="exp_3", number=5, expiry=3, channels=sg_user_channels) doc_exp_10_bodies = document.create_docs(doc_id_prefix="exp_10", number=5, expiry=10, channels=sg_user_channels) bulk_bodies = doc_exp_3_bodies + doc_exp_10_bodies bulk_docs = client.add_bulk_docs(url=sg_url, db=sg_db, docs=bulk_bodies, auth=sg_user_session) # Allow exp_3 docs to expire time.sleep(5) bulk_get_docs = client.get_bulk_docs(url=sg_url, db=sg_db, docs=bulk_docs, auth=sg_user_session) expected_ids = ["exp_10_0", "exp_10_1", "exp_10_2", "exp_10_3", "exp_10_4"] expected_missing_ids = ["exp_3_0", "exp_3_1", "exp_3_2", "exp_3_3", "exp_3_4"] client.verify_doc_ids_found_in_response(response=bulk_get_docs, expected_doc_ids=expected_ids) client.verify_doc_ids_not_found_in_response(response=bulk_get_docs, expected_missing_doc_ids=expected_missing_ids)
def test_peer_2_peer_sanity_push_one_shot_continuous(setup_p2p_test): """ 1. Create ls_db1 database on LiteServ One 2. Create ls_db2 database on LiteServ Two 3. Add 10000 docs to LiteServ 1 ls_db1 4. Create one shot push replication LiteServ 1 ls_db1 -> LiteServ 2 ls_db2 5. sleep for 5 seconds 6. Create continuous push replication LiteServ 1 ls_db1 -> LiteServ 2 ls_db2 7. Add 10000 docs to LiteServ 1 ls_db1 8. Verify all docs replicate to LiteServ 2 ls_db2 9. Verify all docs show up in changes for LiteServ 2 ls_db2 """ ls_url_one = setup_p2p_test["ls_url_one"] ls_url_two = setup_p2p_test["ls_url_two"] num_docs_per_db = 10000 log_info("ls_url_one: {}".format(ls_url_one)) log_info("ls_url_two: {}".format(ls_url_two)) client = MobileRestClient() log_info("Creating databases") ls_db1 = client.create_database(url=ls_url_one, name="ls_db1") ls_db2 = client.create_database(url=ls_url_two, name="ls_db2") bulk_docs = create_docs("test_ls_db1_oneshot", num_docs_per_db) ls_db1_docs_oneshot = client.add_bulk_docs(ls_url_one, ls_db1, bulk_docs) assert len(ls_db1_docs_oneshot) == num_docs_per_db # Setup one shot push replication from LiteServ 1 ls_db1 to LiteServ 2 ls_db2 log_info("Setting up a one-shot push replication from ls_db1 to ls_db2") push_repl = client.start_replication( url=ls_url_one, continuous=False, from_db=ls_db1, to_url=ls_url_two, to_db=ls_db2, ) log_info("Replication ID: {}".format(push_repl)) client.verify_docs_present(url=ls_url_two, db=ls_db2, expected_docs=ls_db1_docs_oneshot) client.verify_docs_in_changes(url=ls_url_two, db=ls_db2, expected_docs=ls_db1_docs_oneshot) # Setup continuous push replication from LiteServ 1 ls_db1 to LiteServ 2 ls_db2 log_info("Setting up a continuous push replication from ls_db1 to ls_db2") push_repl = client.start_replication( url=ls_url_one, continuous=True, from_db=ls_db1, to_url=ls_url_two, to_db=ls_db2, ) log_info("Replication ID: {}".format(push_repl)) ls_db1_docs = client.add_docs(url=ls_url_one, db=ls_db1, number=num_docs_per_db, id_prefix="test_ls_db1") assert len(ls_db1_docs) == num_docs_per_db client.verify_docs_present(url=ls_url_two, db=ls_db2, expected_docs=ls_db1_docs_oneshot + ls_db1_docs) client.verify_docs_in_changes(url=ls_url_two, db=ls_db2, expected_docs=ls_db1_docs_oneshot + ls_db1_docs)
def test_view_query_performance(setup_client_syncgateway_test): """ @summary Run this test when new iOS version arrives to make sure CBL query performance is not diminishing 1. Add 100000 docs to the client with content 2. Create design doc version 1 to fetch doc._id, doc._rev for docs with content 3. Update docs 3 times which gets revision number 4- 4. Run a query and check for 100000 expected docs with design doc version 1 3. Verify view query finished less than 5 seconds """ log_info("Running 'test_design_doc_update'") ls_url = setup_client_syncgateway_test["ls_url"] log_info("ls_url: {}".format(ls_url)) client = MobileRestClient() ls_url = setup_client_syncgateway_test["ls_url"] num_content_docs_per_db = 100000 d_doc_name = "dd" ls_db = client.create_database(ls_url, name="ls_db") # Add 100000 docs to the client with content bulk_docs_content = create_docs("doc_content_", num_content_docs_per_db, content={"hi": "I should be in the view"}) ls_db_docs1 = client.add_bulk_docs(url=ls_url, db=ls_db, docs=bulk_docs_content) assert len(ls_db_docs1) == num_content_docs_per_db client.update_docs(url=ls_url, db=ls_db, docs=ls_db_docs1, number_updates=3, delay=0.1) # Design doc to to fetch doc._id, doc._rev for docs with content view = """{ "language" : "javascript", "views" : { "content_view" : { "map" : "function(doc, meta) { if (doc.content) { emit(doc._id, doc._rev); } }" } } }""" client.add_design_doc(url=ls_url, db=ls_db, name=d_doc_name, doc=view) start = time.time() content_view_rows_1 = client.get_view(url=ls_url, db=ls_db, design_doc_name=d_doc_name, view_name="content_view") finish = time.time() assert finish - start < 5 client.verify_view_row_num(view_response=content_view_rows_1, expected_num_rows=10000)
def test_peer_2_peer_sanity_pull(setup_p2p_test, num_docs_per_db, seeded_db, attachments_generator): """ 1. Create ls_db1 database on LiteServ One 2. Create ls_db2 database on LiteServ Two 3. Create continuous pull replication LiteServ 1 ls_db1 <- LiteServ 2 ls_db2 4. Add 5000 docs to LiteServ 2 ls_db2 5. Verify all docs replicate to LiteServ 1 ls_db1 6. Verify all docs show up in changes for LiteServ 1 ls_db1 """ ls_url_one = setup_p2p_test["ls_url_one"] ls_url_two = setup_p2p_test["ls_url_two"] log_info("ls_url_one: {}".format(ls_url_one)) log_info("ls_url_two: {}".format(ls_url_two)) attachments = False if attachments_generator: log_info("Running test_peer_2_peer_sanity_pull with attachment {}".format(attachments_generator)) attachments = True client = MobileRestClient() log_info("Creating databases") ls_db1 = client.create_database(url=ls_url_one, name="ls_db1") ls_db2 = client.create_database(url=ls_url_two, name="ls_db2") if seeded_db: bulk_docs = create_docs("test_ls_db2_seed", num_docs_per_db) ls_db2_docs_seed = client.add_bulk_docs(url=ls_url_two, db=ls_db2, docs=bulk_docs) assert len(ls_db2_docs_seed) == num_docs_per_db # Setup continuous pull replication from LiteServ 2 ls_db2 to LiteServ 1 ls_db1 pull_repl = client.start_replication( url=ls_url_one, continuous=True, from_url=ls_url_two, from_db=ls_db2, to_db=ls_db1 ) client.wait_for_replication_status_idle(url=ls_url_one, replication_id=pull_repl) ls_db2_docs = client.add_docs(url=ls_url_two, db=ls_db2, number=num_docs_per_db, id_prefix="test_ls_db2", attachments_generator=attachments_generator) assert len(ls_db2_docs) == num_docs_per_db client.verify_docs_present(url=ls_url_one, db=ls_db1, expected_docs=ls_db2_docs, attachments=attachments) client.verify_docs_in_changes(url=ls_url_one, db=ls_db1, expected_docs=ls_db2_docs) total_ls_db2_docs = ls_db2_docs if seeded_db: total_ls_db2_docs += ls_db2_docs_seed client.verify_docs_present(url=ls_url_one, db=ls_db1, expected_docs=total_ls_db2_docs, attachments=attachments) client.verify_docs_in_changes(url=ls_url_one, db=ls_db1, expected_docs=total_ls_db2_docs)
def test_no_conflicts_enabled(params_from_base_test_setup, sg_conf_name, num_of_docs): """ @summary : Enable no conflicts and verify conflicts are not created Test case link : https://docs.google.com/spreadsheets/d/1YwI_gCeoBebQKBybkzoAEoXSc0XLReszDA-mPFQapgk/edit#gid=0 covered #3 Steps: 1. Enable allow_conflicts = false in SG config 2. Add docs to SG. 3. Update the docs few times. 4. Try to create a conflict. 5. Check the revision list for the doc 6. Verify no conflicts can be created. 7. Sync Gateway should respond with a 409 upon trying to add a conflict 8. Should be possible to add more than 1 revision (Default revs_limit = 1000) """ # Setup cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] mode = params_from_base_test_setup["mode"] sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] no_conflicts_enabled = params_from_base_test_setup["no_conflicts_enabled"] sg_db = "db" if not no_conflicts_enabled: pytest.skip('--no-conflicts is not enabled, so skipping the test') sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) c = cluster.Cluster(cluster_config) c.reset(sg_conf) sg_client = MobileRestClient() channels = ["no-conflicts"] sg_client.create_user(url=sg_admin_url, db=sg_db, name='autotest', password='******', channels=channels) autouser_session = sg_client.create_session(url=sg_admin_url, db=sg_db, name='autotest', password='******') # end of Set up # 2. Add docs to SG. sgdoc_bodies = document.create_docs(doc_id_prefix='sg_docs', number=num_of_docs, attachments_generator=attachment.generate_2_png_10_10, channels=channels) sg_docs = sg_client.add_bulk_docs(url=sg_url, db=sg_db, docs=sgdoc_bodies, auth=autouser_session) assert len(sgdoc_bodies) == num_of_docs # 3. Update the docs few times sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=3, delay=None, auth=autouser_session, channels=channels) # 4. Try to create a conflict for doc in sg_docs: with pytest.raises(HTTPError) as he: sg_client.add_conflict(url=sg_url, db=sg_db, doc_id=doc["id"], parent_revisions=doc["rev"], new_revision="2-foo", auth=autouser_session) assert he.value.message.startswith('409 Client Error: Conflict for url:') # 6. Update the docs 1 more time sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=1, delay=None, auth=autouser_session, channels=channels)
def test_revs_cache_size(params_from_base_test_setup, sg_conf_name, num_of_docs): """ @summary Test for no-conflicts with rev_cache size Test case link : https://docs.google.com/spreadsheets/d/1YwI_gCeoBebQKBybkzoAEoXSc0XLReszDA-mPFQapgk/edit#gid=0 covered #18 Steps: Note : the sg config have rev_cache_size as 1000 , make sure number of docs is less than 1000 to have the test work with expected behavior 1. Add docs to SG. 2. Get the docs 3. Verify number of rev_cache_hits is same as number of docs if rev_cache_size is more than number of docs. """ # Setup cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] mode = params_from_base_test_setup["mode"] sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" retrieved_docs = num_of_docs / 2 sync_gateway_version = params_from_base_test_setup["sync_gateway_version"] if sync_gateway_version < "2.0": pytest.skip('It does not work with sg < 2.0 , so skipping the test') sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) c = cluster.Cluster(cluster_config) c.reset(sg_conf) sg_client = MobileRestClient() channels = ["no-conflicts"] sg_client.create_user(url=sg_admin_url, db=sg_db, name='autotest', password='******', channels=channels) autouser_session = sg_client.create_session(url=sg_admin_url, db=sg_db, name='autotest', password='******') # end of Set up # 2. Add docs to SG. sgdoc_bodies = document.create_docs(doc_id_prefix="sg_docs", number=num_of_docs, channels=channels) sg_docs = sg_client.add_bulk_docs(url=sg_url, db=sg_db, docs=sgdoc_bodies, auth=autouser_session) assert len(sgdoc_bodies) == num_of_docs # 3. Get all docs for i in range(retrieved_docs): doc = sg_docs[i] sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc["id"], auth=autouser_session) # 4. Verify there are number of hits should be same as retrieved docs exp_vars = sg_client.get_expvars(url=sg_admin_url) revision_cache_hits = exp_vars["syncGateway_stats"]["revisionCache_hits"] revision_cache_misses = exp_vars["syncGateway_stats"]["revisionCache_misses"] assert revision_cache_hits == retrieved_docs, "Revision Cache hits did not hit with expected number {}".format(num_of_docs) assert revision_cache_misses == 0, "Revision Cache misses is not 0"
def test_peer_2_peer_sanity_pull_one_shot(setup_p2p_test): """ 1. Create ls_db1 database on LiteServ One 2. Create ls_db2 database on LiteServ Two 3. Add 10000 docs to LiteServ 2 ls_db2 4. Create one shot pull replication LiteServ 1 ls_db1 <- LiteServ 2 ls_db2 5. Verify all docs replicate to LiteServ 1 ls_db1 6. Verify all docs show up in changes for LiteServ 1 ls_db1 """ ls_url_one = setup_p2p_test["ls_url_one"] ls_url_two = setup_p2p_test["ls_url_two"] num_docs_per_db = 10000 log_info("ls_url_one: {}".format(ls_url_one)) log_info("ls_url_two: {}".format(ls_url_two)) client = MobileRestClient() log_info("Creating databases") ls_db1 = client.create_database(url=ls_url_one, name="ls_db1") ls_db2 = client.create_database(url=ls_url_two, name="ls_db2") bulk_docs = create_docs("test_ls_db2", num_docs_per_db) ls_db2_docs = client.add_bulk_docs(url=ls_url_two, db=ls_db2, docs=bulk_docs) assert len(ls_db2_docs) == num_docs_per_db # Setup one shot pull replication from LiteServ 2 ls_db2 to LiteServ 1 ls_db1 log_info("Setting up a one-shot pull replication from ls_db2 to ls_db1") pull_repl = client.start_replication( url=ls_url_one, continuous=False, from_url=ls_url_two, from_db=ls_db2, to_db=ls_db1 ) log_info("Replication ID: {}".format(pull_repl)) client.verify_docs_present(url=ls_url_one, db=ls_db1, expected_docs=ls_db2_docs) client.verify_docs_in_changes(url=ls_url_one, db=ls_db1, expected_docs=ls_db2_docs)
def add_user_docs(client, sg_url, sg_db, user_name, user_auth, channels, number_docs_per_user, batch_size, create_delay): doc_ids = [] docs_pushed = 0 batch_count = 0 # Even filtered users should add docs with ['even'] channel # Odd filtered users should add docs with ['odd'] channel if user_name.startswith('filtered_channel_user'): # The split below will result in the following format ['filtered', 'channel', 'user', '2'] user_name_parts = user_name.split('_') user_index = int(user_name_parts[3]) if user_index % 2 == 0: channels = ['even'] else: channels = ['odd'] while docs_pushed < number_docs_per_user: # Create batch of docs docs = document.create_docs(doc_id_prefix='{}-{}'.format( user_name, batch_count), number=batch_size, prop_generator=document.doc_1k, channels=channels) # Add batch of docs log_info('User ({}) adding {} docs.'.format(user_name, number_docs_per_user)) docs = client.add_bulk_docs(sg_url, sg_db, docs, auth=user_auth) batch_doc_ids = [doc['id'] for doc in docs] doc_ids.extend(batch_doc_ids) docs_pushed += batch_size batch_count += 1 # Sleep 'create_delay' second before adding another batch time.sleep(create_delay) return doc_ids
def test_document_resurrection(params_from_base_test_setup, sg_conf_name, deletion_type): """ Scenarios: Doc meta mode / tombstone - Create docs (set A) via Sync Gateway - Delete docs (set A) via Sync Gateway - Verify docs (set A) are deleted via Sync Gateway - Create docs (set A) via Sync Gateway - Verify revs (set A) are generation 3 via Sync Gateway Doc meta mode / purge - Create docs (set A) via Sync Gateway - Purge docs (set A) via Sync Gateway - Verify docs (set A) are deleted via Sync Gateway - Create docs (set A) via Sync Gateway - Verify revs (set A) are generation 1 via Sync Gateway XATTRs / tombstone - Create docs (set A) via Sync Gateway - Create docs (set B) via SDK - Delete SDK docs (set B) via Sync Gateway - Delete SG docs (set A) via SDK - Verify docs (set B) are deleted via Sync Gateway - Verify docs (set B) are deleted via SDK - Verify docs (set A) are deleted via Sync Gateway - Verify docs (set A) are deleted via SDK - Create docs (set A) via Sync Gateway - Create docs (set B) via SDK - Verify revs (set A, B) are generation 3 via Sync Gateway XATTRs / purge - Create docs (set A) via Sync Gateway - Create docs (set B) via SDK - Purge SDK docs (set B) via Sync Gateway - Delete SG docs (set A) via SDK - Verify docs (set B) are deleted via Sync Gateway - Verify docs (set B) are deleted via SDK - Verify docs (set A) are deleted via Sync Gateway - Verify docs (set A) are deleted via SDK - Create docs (set A) via Sync Gateway - Create docs (set B) via SDK - Verify revs (set A, B) are generation 1 via Sync Gateway """ cluster_conf = params_from_base_test_setup['cluster_config'] cluster_topology = params_from_base_test_setup['cluster_topology'] mode = params_from_base_test_setup['mode'] xattrs_enabled = params_from_base_test_setup['xattrs_enabled'] cbs_url = cluster_topology['couchbase_servers'][0] sg_admin_url = cluster_topology['sync_gateways'][0]['admin'] sg_url = cluster_topology['sync_gateways'][0]['public'] bucket_name = 'data-bucket' sg_db = 'db' cbs_host = host_for_url(cbs_url) num_docs_per_client = 10 # Reset cluster sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) # Initialize clients sg_client = MobileRestClient() sdk_client = Bucket('couchbase://{}/{}'.format(cbs_host, bucket_name), password='******') # Create Sync Gateway user sg_user_channels = ['NASA', 'NATGEO'] sg_client.create_user(url=sg_admin_url, db=sg_db, name='seth', password='******', channels=sg_user_channels) sg_user_auth = sg_client.create_session(url=sg_admin_url, db=sg_db, name='seth', password='******') # Create / Add docs from SG sg_doc_bodies = document.create_docs( doc_id_prefix='sg_doc', number=num_docs_per_client, content={'foo': 'bar'}, channels=sg_user_channels, attachments_generator=attachment.generate_2_png_10_10 ) sg_doc_ids = [doc['_id'] for doc in sg_doc_bodies] sg_bulk_docs_resp = sg_client.add_bulk_docs(url=sg_url, db=sg_db, docs=sg_doc_bodies, auth=sg_user_auth) assert len(sg_bulk_docs_resp) == num_docs_per_client all_doc_ids = sg_doc_ids assert len(all_doc_ids) == num_docs_per_client if xattrs_enabled: # Create / Add docs from sdk log_info('Adding docs via SDK') sdk_doc_bodies = document.create_docs( doc_id_prefix='sdk_doc', number=num_docs_per_client, content={'foo': 'bar'}, channels=sg_user_channels, ) sdk_docs = {doc['_id']: doc for doc in sdk_doc_bodies} sdk_doc_ids = [doc['_id'] for doc in sdk_doc_bodies] log_info('Creating SDK docs') sdk_client.upsert_multi(sdk_docs) all_doc_ids = sg_doc_ids + sdk_doc_ids assert len(all_doc_ids) == num_docs_per_client * 2 if deletion_type == 'tombstone': # Set the target docs. # Doc meta mode: Delete Sync Gateway docs via Sync Gateway # XATTR mode: Delete SDK docs via Sync Gateway sg_doc_ids_to_delete = sg_doc_ids if xattrs_enabled: sg_doc_ids_to_delete = sdk_doc_ids # SG delete target docs for doc_id in sg_doc_ids_to_delete: doc = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id, auth=sg_user_auth) deleted = sg_client.delete_doc(url=sg_url, db=sg_db, doc_id=doc_id, rev=doc['_rev'], auth=sg_user_auth) log_info(deleted) if xattrs_enabled: log_info('Deleting SG docs via SDK') sdk_client.remove_multi(sg_doc_ids) elif deletion_type == 'purge': # SG Purge all docs all_docs, errors = sg_client.get_bulk_docs(url=sg_url, db=sg_db, doc_ids=all_doc_ids, auth=sg_user_auth) if xattrs_enabled: assert len(all_docs) == num_docs_per_client * 2 assert len(errors) == 0 else: assert len(all_docs) == num_docs_per_client assert len(errors) == 0 log_info('Purging docs via Sync Gateway') sg_client.purge_docs(url=sg_admin_url, db=sg_db, docs=all_docs) else: raise ValueError('Invalid test parameters') # Verify deletes via Sync Gateway deleted_docs_to_verify = sg_doc_ids assert len(deleted_docs_to_verify) == num_docs_per_client # If running is xattr mode, make sure to verify SG + SDK docs if xattrs_enabled: deleted_docs_to_verify = sg_doc_ids + sdk_doc_ids assert len(deleted_docs_to_verify) == num_docs_per_client * 2 if xattrs_enabled and deletion_type == 'tombstone': # Verify SDK + SG docs are deleted from Sync Gateway verify_sg_deletes(sg_client, sg_url, sg_db, deleted_docs_to_verify, sg_user_auth) # Verify SDK + SG docs are deleted from SDK verify_sdk_deletes(sdk_client, deleted_docs_to_verify) elif xattrs_enabled and deletion_type == 'purge': # Verify SDK + SG docs are purged from Sync Gateway verify_sg_purges(sg_client, sg_url, sg_db, deleted_docs_to_verify, sg_user_auth) # Verify SDK + SG docs are deleted from SDK verify_sdk_deletes(sdk_client, deleted_docs_to_verify) elif not xattrs_enabled and deletion_type == 'tombstone': # Doc meta: Verify SG docs are all deleted via SG verify_sg_deletes(sg_client, sg_url, sg_db, deleted_docs_to_verify, sg_user_auth) elif not xattrs_enabled and deletion_type == 'purge': # Doc meta: Verify SG docs are all deleted via SG verify_sg_purges(sg_client, sg_url, sg_db, deleted_docs_to_verify, sg_user_auth) else: raise ValueError('Invalid test parameters') # Recreate deleted docs from Sync Gateway sg_bulk_docs_resp = sg_client.add_bulk_docs(url=sg_url, db=sg_db, docs=sg_doc_bodies, auth=sg_user_auth) assert len(sg_bulk_docs_resp) == num_docs_per_client if xattrs_enabled: log_info('Recreating SDK docs') # Recreate deleted docs from SDK sdk_client.upsert_multi(sdk_docs) # Get docs via Sync Gateway doc_ids_to_get = sg_doc_ids if xattrs_enabled: doc_ids_to_get = sg_doc_ids + sdk_doc_ids docs, errors = sg_client.get_bulk_docs( url=sg_url, db=sg_db, doc_ids=doc_ids_to_get, auth=sg_user_auth, validate=False ) if xattrs_enabled: assert len(docs) == num_docs_per_client * 2 assert len(errors) == 0 else: assert len(docs) == num_docs_per_client assert len(errors) == 0 if xattrs_enabled: # Get SDK docs and makes sure all docs were recreated all_docs_from_sdk = sdk_client.get_multi(doc_ids_to_get) assert len(all_docs_from_sdk) == num_docs_per_client * 2 log_info('Found: {} recreated docs via SDK'.format(len(all_docs_from_sdk))) # Make sure we are able to get recreated docs via SDK doc_ids_to_get_scratch = list(doc_ids_to_get) assert len(doc_ids_to_get_scratch) == num_docs_per_client * 2 for doc_id in all_docs_from_sdk: doc_ids_to_get_scratch.remove(doc_id) assert len(doc_ids_to_get_scratch) == 0 # Make sure we are able to get recreated docs via SDK doc_ids_to_get_scratch = list(doc_ids_to_get) if xattrs_enabled: # SG + SDK docs assert len(doc_ids_to_get_scratch) == num_docs_per_client * 2 else: # SG docs assert len(doc_ids_to_get_scratch) == num_docs_per_client for doc in docs: # Verify expected document revisions if xattrs_enabled: if deletion_type == 'purge': # SG purges SG docs and recreates them, expecting 1- rev # SDK removes SDK docs and recreates them, expecting 1- rev assert doc['_rev'].startswith('1-') else: # SG tombstones SG docs and recreates them, expecting 3- rev # SDK removes SDK docs and recreates them, expecting 1- rev if doc['_id'].startswith('sg_'): assert doc['_rev'].startswith('3-') else: assert doc['_rev'].startswith('1-') else: if deletion_type == 'purge': # SG purges SG docs and recreates them, expecting 1- rev assert doc['_rev'].startswith('1-') else: # SG tombstones SG docs and recreates them, expecting 3- rev assert doc['_rev'].startswith('3-') doc_ids_to_get_scratch.remove(doc['_id']) # Make sure all docs were found assert len(doc_ids_to_get_scratch) == 0
def test_conflicts_sg_accel_added(params_from_base_test_setup, sg_conf_name, num_of_docs, revs_limit, additional_updates): """ @summary Verify no conflicts feature works with sg accel down Test case link : https://docs.google.com/spreadsheets/d/1YwI_gCeoBebQKBybkzoAEoXSc0XLReszDA-mPFQapgk/edit#gid=0 covered #12, #16, #21 Steps: 1. Enable allow_conflicts = false in SG config with revs_limit 2. Add docs to SG. 3. Update the docs few times and get all revisions of updates 4. Get number of revisions and verify length is equal to revs_limit set to 5. Start sg accel 6. Update the docs with few updates 7. Get number of revisions and verify number of revisions is equvalent to revs_limit set to """ # Setup cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] mode = params_from_base_test_setup["mode"] sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] no_conflicts_enabled = params_from_base_test_setup["no_conflicts_enabled"] sg_db = "db" total_updates = revs_limit + additional_updates new_updates = 2 if not no_conflicts_enabled or mode != "di": pytest.skip('--no-conflicts is not enabled or mode is not di, so skipping the test') sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) c = cluster.Cluster(cluster_config) c.reset(sg_conf) c.sg_accels[0].stop() sg_client = MobileRestClient() channels = ["no-conflicts"] sg_client.create_user(url=sg_admin_url, db=sg_db, name='autotest', password='******', channels=channels) autouser_session = sg_client.create_session(url=sg_admin_url, db=sg_db, name='autotest', password='******') # end of Set up # 1. Enable allow_conflicts = false in SG config with revs_limit temp_cluster_config = copy_to_temp_conf(cluster_config, mode) persist_cluster_config_environment_prop(temp_cluster_config, 'revs_limit', revs_limit, property_name_check=False) status = c.sync_gateways[0].restart(config=sg_conf, cluster_config=temp_cluster_config) assert status == 0, "Syncgateway did not start after having revs_limit with no conflicts mode" # 2. Add docs to SG. sgdoc_bodies = document.create_docs(doc_id_prefix="sg_docs", number=num_of_docs, channels=channels) sg_docs = sg_client.add_bulk_docs(url=sg_url, db=sg_db, docs=sgdoc_bodies, auth=autouser_session) assert len(sgdoc_bodies) == num_of_docs # 3. Update the docs few times and get all revisions of updates prev_revs = [] for i in xrange(total_updates): update_sg_docs = sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=1, delay=None, auth=autouser_session, channels=channels) rev = update_sg_docs[0]['rev'].split('-')[1] prev_revs.append(rev) # 4. Get number of revisions and verify length is equal to revs_limit set to for doc in sg_docs: num_of_revs = sg_client.get_revs_num_in_history(url=sg_url, db=sg_db, doc_id=doc["id"], auth=autouser_session) assert len(num_of_revs) == revs_limit, "Number of revisions in history is more than revs_limit set in sg config" for i in xrange(additional_updates): assert prev_revs[i] not in num_of_revs # 5. Start sg accel status = c.sg_accels[0].start(config=sg_conf) assert status == 0, "sync_gateway accel did not start" # 6. Update the docs with few updates sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=new_updates, delay=None, auth=autouser_session, channels=channels) # 7. Get number of revisions and verify number of revisions is equvalent to revs_limit set to for doc in sg_docs: num_of_revs = sg_client.get_revs_num_in_history(url=sg_url, db=sg_db, doc_id=doc["id"], auth=autouser_session) assert len(num_of_revs) == revs_limit, "Number of revisions in history is more than revs_limit set in sg config" for i in xrange(additional_updates + new_updates): assert prev_revs[i] not in num_of_revs
def test_user_views_sanity(params_from_base_test_setup, sg_conf_name): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'single_user_multiple_channels'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) sg_db = "db" number_docs_per_channel = 100 topology = params_from_base_test_setup["cluster_topology"] cbs_url = topology["couchbase_servers"][0] bucket = "data-bucket" sg_admin_url = topology["sync_gateways"][0]["admin"] sg_public_url = topology["sync_gateways"][0]["public"] client = MobileRestClient() # These are defined in the sg config # Scientist role has channels ["Download"] # Researcher role has channels ["Upload"] # "seth" has "Scientist" role and ["Create"] channel # "raghu" has "Researcher" role and ["Edit"] channel # Issue GET /_user to exercise principal views users = client.get_users(url=sg_admin_url, db=sg_db) # These are defined in the config assert len(users) == 2 and "seth" in users and "raghu" in users # Issue GET /_role to exercise principal views roles = client.get_roles(url=sg_admin_url, db=sg_db) # These are defined in the config assert len(roles) == 2 and "Scientist" in roles and "Researcher" in roles # Verify channels on each role scientist_role = client.get_role(url=sg_admin_url, db=sg_db, name="Scientist") researcher_role = client.get_role(url=sg_admin_url, db=sg_db, name="Researcher") assert len(scientist_role["all_channels"]) == 2 assert "Download" in scientist_role["all_channels"] assert "!" in scientist_role["all_channels"] assert scientist_role["name"] == "Scientist" assert len(scientist_role["admin_channels"] ) == 1 and "Download" in scientist_role["admin_channels"] assert len(researcher_role["all_channels"]) == 2 assert "Upload" in researcher_role["all_channels"] assert "!" in researcher_role["all_channels"] assert researcher_role["name"] == "Researcher" assert len(researcher_role["admin_channels"] ) == 1 and "Upload" in researcher_role["admin_channels"] # Verify roles are assigned to the user seth_user = client.get_user(url=sg_admin_url, db=sg_db, name="seth") raghu_user = client.get_user(url=sg_admin_url, db=sg_db, name="raghu") assert len(seth_user["all_channels"]) == 3 assert "!" in seth_user["all_channels"] and "Create" in seth_user[ "all_channels"] and "Download" in seth_user["all_channels"] assert seth_user["admin_roles"] == ["Scientist"] assert seth_user["roles"] == ["Scientist"] assert len(raghu_user["all_channels"]) == 3 assert "!" in raghu_user["all_channels"] and "Edit" in raghu_user[ "all_channels"] and "Upload" in raghu_user["all_channels"] assert raghu_user["admin_roles"] == ["Researcher"] assert raghu_user["roles"] == ["Researcher"] seth_session = client.create_session(url=sg_admin_url, db=sg_db, name="seth", password="******") raghu_session = client.create_session(url=sg_admin_url, db=sg_db, name="raghu", password="******") start = time.time() download_doc_bodies = document.create_docs( doc_id_prefix="download_doc", number=number_docs_per_channel, attachments_generator=attachment.generate_2_png_100_100, channels=["Download"]) upload_doc_bodies = document.create_docs( doc_id_prefix="upload_doc", number=number_docs_per_channel, attachments_generator=attachment.generate_png_100_100, channels=["Upload"]) create_doc_bodies = document.create_docs( doc_id_prefix="create_doc", number=number_docs_per_channel, attachments_generator=attachment.generate_2_png_100_100, channels=["Create"]) edit_doc_bodies = document.create_docs( doc_id_prefix="edit_doc", number=number_docs_per_channel, attachments_generator=attachment.generate_png_100_100, channels=["Edit"]) end = time.time() - start log_info("Time to create docs: {}s".format(end)) download_docs = client.add_bulk_docs(url=sg_public_url, db=sg_db, docs=download_doc_bodies, auth=seth_session) assert len(download_docs) == number_docs_per_channel upload_docs = client.add_bulk_docs(url=sg_public_url, db=sg_db, docs=upload_doc_bodies, auth=raghu_session) assert len(upload_docs) == number_docs_per_channel create_docs = client.add_bulk_docs(url=sg_public_url, db=sg_db, docs=create_doc_bodies, auth=seth_session) assert len(create_docs) == number_docs_per_channel edit_docs = client.add_bulk_docs(url=sg_public_url, db=sg_db, docs=edit_doc_bodies, auth=raghu_session) assert len(edit_docs) == number_docs_per_channel # Assert that the attachment docs gets written to couchbase server server = couchbaseserver.CouchbaseServer(cbs_url) server_att_docs = server.get_server_docs_with_prefix(bucket=bucket, prefix="_sync:att:") expected_num_attachments = (number_docs_per_channel * 2) + \ number_docs_per_channel + \ (number_docs_per_channel * 2) + \ number_docs_per_channel assert len(server_att_docs) == expected_num_attachments design_doc = { "views": { "filtered": { "map": 'function(doc, meta) {emit(meta._id, doc.channels);}' }, "filtered_more": { "map": 'function(doc, meta) { if (doc.channels.indexOf("Create") != -1 || doc.channels.indexOf("Edit") != -1) {emit(meta._id, doc.channels);}}' } } } client.add_design_doc(url=sg_admin_url, db=sg_db, name="test_views", doc=json.dumps(design_doc)) # "seth" should see docs for channels ["Create", "Download"] seth_filtered = client.get_view(url=sg_public_url, db=sg_db, design_doc_name="test_views", view_name="filtered", auth=seth_session) seth_filtered_rows = seth_filtered["rows"] validate_rows(rows=seth_filtered_rows, num_expected_rows=2 * number_docs_per_channel, expected_id_prefixes=["create_doc", "download_doc"], number_of_prefixed_docs=number_docs_per_channel) # "seth" should only see docs with "Create" channel through this view seth_filtered_more = client.get_view(url=sg_public_url, db=sg_db, design_doc_name="test_views", view_name="filtered_more", auth=seth_session) seth_filtered_more_rows = seth_filtered_more["rows"] validate_rows(rows=seth_filtered_more_rows, num_expected_rows=number_docs_per_channel, expected_id_prefixes=["create_doc"], number_of_prefixed_docs=number_docs_per_channel) # "raghu" should see docs for channels ["Upload", "Edit"] raghu_filtered = client.get_view(url=sg_public_url, db=sg_db, design_doc_name="test_views", view_name="filtered", auth=raghu_session) raghu_rows = raghu_filtered["rows"] validate_rows(rows=raghu_rows, num_expected_rows=2 * number_docs_per_channel, expected_id_prefixes=["upload_doc", "edit_doc"], number_of_prefixed_docs=number_docs_per_channel) # "raghu" should only see docs with "Edit" channel through this view raghu_filtered_more = client.get_view(url=sg_public_url, db=sg_db, design_doc_name="test_views", view_name="filtered_more", auth=raghu_session) raghu_filtered_more_rows = raghu_filtered_more["rows"] validate_rows(rows=raghu_filtered_more_rows, num_expected_rows=number_docs_per_channel, expected_id_prefixes=["edit_doc"], number_of_prefixed_docs=number_docs_per_channel)
def test_take_all_sgaccels_down(params_from_base_test_setup, sg_conf): """ Scenario that takes all sync_gateway accel nodes offline during doc load. After bring the nodes back online during load, the reshard of the DCP feed is verified. The changes feed is verified that all docs show up. 1. Start doc load (1000 doc) 2. Take all sg_accel nodes down in parallel 3. Verify node are down 4. Wait for doc adds to complete, store "doc_push_result_1" 5. Verify "doc_push_result_1" docs added 6. Start doc load (1000 docs) 7. Wait for 5. to complete, store "doc_push_result_2" 8. Verify "doc_push_result_2" docs added 9. Start another doc load (1000 docs) 10. Bring up nodes in parallel 11. poll on p-index reshard 12. Wait for 9. to complete, store "doc_push_result_3" 13. Verify "doc_push_result_3" docs added 14. Verify "doc_push_result_1" + "doc_push_result_2" + "doc_push_result_3" show up in _changes feed """ cluster_conf = params_from_base_test_setup["cluster_config"] log_info("Running 'test_dcp_reshard_single_sg_accel_goes_down_and_up'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) cluster_util = ClusterKeywords() topology = cluster_util.get_cluster_topology(cluster_conf) sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" num_docs = 1000 client = MobileRestClient() doc_pusher_user_info = userinfo.UserInfo("doc_pusher", "pass", channels=["A"], roles=[]) doc_pusher_auth = client.create_user( url=sg_admin_url, db=sg_db, name=doc_pusher_user_info.name, password=doc_pusher_user_info.password, channels=doc_pusher_user_info.channels) a_user_info = userinfo.UserInfo("a_user", "pass", channels=["A"], roles=[]) client.create_user(url=sg_admin_url, db=sg_db, name=a_user_info.name, password=a_user_info.password, channels=a_user_info.channels) a_user_session = client.create_session(url=sg_admin_url, db=sg_db, name=a_user_info.name, password=a_user_info.password) # Shutdown all accel nodes in parallel with concurrent.futures.ThreadPoolExecutor(max_workers=3) as ex: # Start adding docs docs_1 = document.create_docs(None, num_docs, channels=doc_pusher_user_info.channels) docs_1_task = ex.submit(client.add_bulk_docs, url=sg_url, db=sg_db, docs=docs_1, auth=doc_pusher_auth) # Take down all access nodes log_info("Shutting down sg_accels: [{}, {}, {}] ...".format( cluster.sg_accels[0], cluster.sg_accels[1], cluster.sg_accels[2])) sg_accel_down_task_1 = ex.submit(cluster.sg_accels[0].stop) sg_accel_down_task_2 = ex.submit(cluster.sg_accels[1].stop) sg_accel_down_task_3 = ex.submit(cluster.sg_accels[2].stop) assert sg_accel_down_task_1.result() == 0 assert sg_accel_down_task_2.result() == 0 assert sg_accel_down_task_3.result() == 0 # Block until bulk_docs is complete doc_push_result_1 = docs_1_task.result() assert len(doc_push_result_1) == num_docs client.verify_docs_present(url=sg_url, db=sg_db, expected_docs=doc_push_result_1, auth=doc_pusher_auth) # Load sync_gateway with another batch of docs while the sg_accel nodes are offline docs_2_bodies = document.create_docs( None, num_docs, channels=doc_pusher_user_info.channels) docs_push_result_2 = client.add_bulk_docs(url=sg_url, db=sg_db, docs=docs_2_bodies, auth=doc_pusher_auth) assert len(docs_push_result_2) == num_docs client.verify_docs_present(url=sg_url, db=sg_db, expected_docs=docs_push_result_2, auth=doc_pusher_auth) # Start loading Sync Gateway with another set of docs while bringing the sg_accel nodes online docs_3 = document.create_docs(None, num_docs, channels=doc_pusher_user_info.channels) docs_3_task = ex.submit(client.add_bulk_docs, url=sg_url, db=sg_db, docs=docs_3, auth=doc_pusher_auth) # Bring all the sg_accel nodes back up # Take down all access nodes log_info("Starting sg_accels: [{}, {}, {}] ...".format( cluster.sg_accels[0], cluster.sg_accels[1], cluster.sg_accels[2])) sg_accel_up_task_1 = ex.submit(cluster.sg_accels[0].start, sg_conf) sg_accel_up_task_2 = ex.submit(cluster.sg_accels[1].start, sg_conf) sg_accel_up_task_3 = ex.submit(cluster.sg_accels[2].start, sg_conf) assert sg_accel_up_task_1.result() == 0 assert sg_accel_up_task_2.result() == 0 assert sg_accel_up_task_3.result() == 0 # Wait for pindex to reshard correctly assert cluster.validate_cbgt_pindex_distribution_retry(3) # Block until second bulk_docs is complete doc_push_result_3 = docs_3_task.result() assert len(doc_push_result_3) == num_docs client.verify_docs_present(url=sg_url, db=sg_db, expected_docs=doc_push_result_3, auth=doc_pusher_auth) # Combine the 3 push results and make sure the changes propagate to a_user # a_user has access to the doc's channel. log_info("Verifying all the changes show up for 'a_user' ...") all_docs = doc_push_result_1 + docs_push_result_2 + doc_push_result_3 client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=all_docs, auth=a_user_session, polling_interval=2)
def test_design_doc_update(setup_client_syncgateway_test): """ Ref: https://github.com/couchbase/couchbase-lite-android/issues/1139 https://github.com/couchbaselabs/mobile-testkit/issues/1155 1. Add 10 docs to the client with content 2. Add 5 docs to the client without content 2. Create design doc version 1 to fetch doc._id, doc._rev for docs with content 3. Run a query and check for 10 expected docs with design doc version 1 4. Update design doc to version 2 to fetch doc._id, doc._rev for docs with no content 5. Run a query and check for 5 expected docs with design doc version 2 -> With CBL 1.4.0 or earlier, even though design doc is updated to version 2, the second query will use design doc 1 and return 10 docs instead of 5 """ log_info("Running 'test_design_doc_update'") ls_url = setup_client_syncgateway_test["ls_url"] log_info("ls_url: {}".format(ls_url)) client = MobileRestClient() ls_url = setup_client_syncgateway_test["ls_url"] num_content_docs_per_db = 10 num_no_content_docs_per_db = 5 d_doc_name = "dd" ls_db = client.create_database(ls_url, name="ls_db") # Add 10 docs to the client with content bulk_docs_content = create_docs("doc_content_", num_content_docs_per_db, content={"hi": "I should be in the view"}) ls_db_docs1 = client.add_bulk_docs(url=ls_url, db=ls_db, docs=bulk_docs_content) assert len(ls_db_docs1) == num_content_docs_per_db # Add 5 docs to the client without content bulk_docs_no_content = create_docs("doc_no_content_", num_no_content_docs_per_db) ls_db_docs2 = client.add_bulk_docs(url=ls_url, db=ls_db, docs=bulk_docs_no_content) assert len(ls_db_docs2) == num_no_content_docs_per_db # Design doc to to fetch doc._id, doc._rev for docs with content view = """{ "language" : "javascript", "views" : { "content_view" : { "map" : "function(doc, meta) { if (doc.content) { emit(doc._id, doc._rev); } }" } } }""" client.add_design_doc(url=ls_url, db=ls_db, name=d_doc_name, doc=view) content_view_rows_1 = client.get_view(url=ls_url, db=ls_db, design_doc_name=d_doc_name, view_name="content_view") client.verify_view_row_num(view_response=content_view_rows_1, expected_num_rows=10) # Design doc to to fetch doc._id, doc._rev for docs with no content view = """{ "language" : "javascript", "views" : { "content_view" : { "map" : "function(doc, meta) { if (!(doc.content)) { emit(doc._id, doc._rev); } }" } } }""" dd_rev = client.get_design_doc_rev(url=ls_url, db=ls_db, name=d_doc_name) assert dd_rev and dd_rev.startswith("1-") log_info("dd_rev: {}".format(dd_rev)) client.update_design_doc(url=ls_url, db=ls_db, name=d_doc_name, doc=view, rev=dd_rev) dd_rev_new = client.get_design_doc_rev(url=ls_url, db=ls_db, name=d_doc_name) assert dd_rev_new != dd_rev and dd_rev_new.startswith("2-") log_info("dd_rev_new: {}".format(dd_rev_new)) content_view_rows_2 = client.get_view(url=ls_url, db=ls_db, design_doc_name=d_doc_name, view_name="content_view") client.verify_view_row_num(view_response=content_view_rows_2, expected_num_rows=5)
def test_no_conflicts_with_revs_limit(params_from_base_test_setup, sg_conf_name, num_of_docs, revs_limit): """ @summary Enable no conflicts and with non default revs_limit and verify revs_limit is maintained Test case link : https://docs.google.com/spreadsheets/d/1YwI_gCeoBebQKBybkzoAEoXSc0XLReszDA-mPFQapgk/edit#gid=0 covered #4, #5 Steps: 1. Enable allow_conflicts = false in SG config with parametrized revs_limit 2. Add docs to SG. 3. Update docs more than revs_limit. 4. Create a conflict and verify it fails. 5. Get number of revisions and verify length is equal to revs_limit set to 6. Update the docs 1 more time 7. Get number of revisions and verify number of revisions should be same as revs_limit 8. Verify previous revisions does not exist """ # Setup cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] no_conflicts_enabled = params_from_base_test_setup["no_conflicts_enabled"] mode = params_from_base_test_setup["mode"] sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" if not no_conflicts_enabled: pytest.skip('--no-conflicts is not enabled, so skipping the test') sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) c = cluster.Cluster(cluster_config) c.reset(sg_conf) sg_client = MobileRestClient() channels = ["no-conflicts"] sg_client.create_user(url=sg_admin_url, db=sg_db, name='autotest', password='******', channels=channels) autouser_session = sg_client.create_session(url=sg_admin_url, db=sg_db, name='autotest', password='******') # end of Set up # 1. Enable allow_conflicts = false in SG config with revs_limit temp_cluster_config = copy_to_temp_conf(cluster_config, mode) persist_cluster_config_environment_prop(temp_cluster_config, 'revs_limit', revs_limit, property_name_check=False) status = c.sync_gateways[0].restart(config=sg_conf, cluster_config=temp_cluster_config) assert status == 0, "Syncgateway did not start after having revs_limit 1 with no conflicts mode" # 2. Add docs to SG. sgdoc_bodies = document.create_docs(doc_id_prefix='sg_docs', number=num_of_docs, attachments_generator=attachment.generate_2_png_10_10, channels=channels) sg_docs = sg_client.add_bulk_docs(url=sg_url, db=sg_db, docs=sgdoc_bodies, auth=autouser_session) assert len(sgdoc_bodies) == num_of_docs # 3. Update the docs few times prev_revs = [] for i in xrange(revs_limit + 5): update_sg_docs = sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=1, delay=None, auth=autouser_session, channels=channels) rev = update_sg_docs[0]['rev'].split('-')[1] prev_revs.append(rev) # 4. Try to create a conflict for doc in sg_docs: with pytest.raises(HTTPError) as he: sg_client.add_conflict(url=sg_url, db=sg_db, doc_id=doc["id"], parent_revisions=doc["rev"], new_revision="2-foo", auth=autouser_session) assert he.value.message.startswith('409 Client Error: Conflict for url:') # 5. Get number of revisions and verify length is equal to revs_limit set to for doc in sg_docs: num_of_revs = sg_client.get_revs_num_in_history(url=sg_url, db=sg_db, doc_id=doc["id"], auth=autouser_session) assert len(num_of_revs) == revs_limit, "Number of revisions in history is more than revs_limit set in sg config" for i in xrange(4): assert prev_revs[i] not in num_of_revs # 6. Update the docs 1 more time sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=1, delay=None, auth=autouser_session, channels=channels) # 7. Get number of revisions and verify number of revisions should be same revs_limit # 8. Verify previous revisions does not exist for doc in sg_docs: num_of_revs = sg_client.get_revs_num_in_history(url=sg_url, db=sg_db, doc_id=doc["id"], auth=autouser_session) assert len(num_of_revs) == revs_limit, "Number of revisions in history is more than revs_limit set in sg config" for i in xrange(5): assert prev_revs[i] not in num_of_revs
def test_migrate_conflicts_delete_last_rev(params_from_base_test_setup, sg_conf_name, num_of_docs): """ @summary Migrate conflicts to no-conflicts mode and delete last revision and verify revisions exists in open revisions Test case link : https://docs.google.com/spreadsheets/d/1YwI_gCeoBebQKBybkzoAEoXSc0XLReszDA-mPFQapgk/edit#gid=0 covered #19 Steps: 1. Start sg with default(i.e allow_conflicts=true) 2. Add docs to SG. 3. Update docs few times . 4. Create a conflicts and verify it is successful. 5. Modify sg config by enabling allow_conflicts to false 6. restart sg. 7. Delete doc by revision of current active open revision 8. Verify all revisions in history exists in open revisions which got at step 4. """ # Setup cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] no_conflicts_enabled = params_from_base_test_setup["no_conflicts_enabled"] mode = params_from_base_test_setup["mode"] sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sync_gateway_version = params_from_base_test_setup["sync_gateway_version"] sg_db = "db" if no_conflicts_enabled or sync_gateway_version < "2.0": pytest.skip('--no-conflicts is enabled and does not work with sg < 2.0 , so skipping the test') sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) # 1. Start sg with default(i.e allow_conflicts=true) c = cluster.Cluster(cluster_config) c.reset(sg_conf) sg_client = MobileRestClient() channels = ["no-conflicts"] sg_client.create_user(url=sg_admin_url, db=sg_db, name='autotest', password='******', channels=channels) autouser_session = sg_client.create_session(url=sg_admin_url, db=sg_db, name='autotest', password='******') # end of Set up # 2. Add docs to SG. sgdoc_bodies = document.create_docs(doc_id_prefix="sg_docs", number=num_of_docs, channels=channels) sg_docs = sg_client.add_bulk_docs(url=sg_url, db=sg_db, docs=sgdoc_bodies, auth=autouser_session) assert len(sgdoc_bodies) == num_of_docs # 3. Update the docs few times prev_revs = [] for i in xrange(5): update_sg_docs = sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=1, delay=None, auth=autouser_session, channels=channels) rev = update_sg_docs[0]['rev'].split('-')[1] prev_revs.append(rev) # 4. Create a conflicts and verify it is successful. for doc in sg_docs: conflicted_rev = sg_client.add_conflict(url=sg_url, db=sg_db, doc_id=doc["id"], parent_revisions=doc["rev"], new_revision="2-foo", auth=autouser_session) assert conflicted_rev["rev"] == "2-foo" for doc in sg_docs: num_of_open_revs = sg_client.get_open_revs_ids(url=sg_url, db=sg_db, doc_id=doc["id"], rev="2-foo", auth=autouser_session) time.sleep(5) # 5. Enable allow_conflicts = false in SG config and 6. restart sg revs_limit = 2 temp_cluster_config = copy_to_temp_conf(cluster_config, mode) persist_cluster_config_environment_prop(temp_cluster_config, 'no_conflicts_enabled', "True", property_name_check=False) persist_cluster_config_environment_prop(temp_cluster_config, 'revs_limit', revs_limit, property_name_check=False) status = c.sync_gateways[0].restart(config=sg_conf, cluster_config=temp_cluster_config) assert status == 0, "Syncgateway did not start after no conflicts is enabled" sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=1, auth=autouser_session, channels=channels) # 6. Delete doc by revision of current active open revision for doc in sg_docs: num_of_revs = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc["id"], auth=autouser_session) sg_client.delete_doc(url=sg_url, db=sg_db, doc_id=doc["id"], rev=num_of_revs["_rev"], auth=autouser_session) # 7.Verify all revisions in history exists in open revisions which got at step 4. for doc in sg_docs: num_of_revs_history = sg_client.get_revs_num_in_history(url=sg_url, db=sg_db, doc_id=doc["id"], auth=autouser_session) for rev in num_of_revs_history: assert rev in num_of_open_revs, "Expected revision does not exist in revision history "
def test_concurrent_updates_no_conflicts(params_from_base_test_setup, sg_conf_name, num_of_docs, revs_limit): """@summary Test with concurrent updates with no conflicts enabled Test case link : https://docs.google.com/spreadsheets/d/1YwI_gCeoBebQKBybkzoAEoXSc0XLReszDA-mPFQapgk/edit#gid=0 covered #15 Steps: 1. Start sg with some revs_limit specified 2. Add docs to SG. 3. Update docs few times via sg . 4. Update docs few times vis sdk concurrently with sg. -> There are chances of getting conflict errors on both, handled the error appropriately 5. update docs few number of times. 6. Verify it can maintain default revisions. 7. Verify previous revisions deleted and revisions maintained based on revs_limit """ # Setup cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] no_conflicts_enabled = params_from_base_test_setup["no_conflicts_enabled"] mode = params_from_base_test_setup["mode"] sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" if revs_limit is None: revs_limit = 1000 additional_updates = revs_limit total_updates = revs_limit + additional_updates if not no_conflicts_enabled: pytest.skip('--no-conflicts is not enabled, so skipping the test') sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) # 1. Start sg c = cluster.Cluster(cluster_config) c.reset(sg_conf) sg_client = MobileRestClient() channels = ["no-conflicts"] sg_client.create_user(url=sg_admin_url, db=sg_db, name='autotest', password='******', channels=channels) autouser_session = sg_client.create_session(url=sg_admin_url, db=sg_db, name='autotest', password='******') temp_cluster_config = copy_to_temp_conf(cluster_config, mode) persist_cluster_config_environment_prop(temp_cluster_config, 'revs_limit', revs_limit, property_name_check=False) status = c.sync_gateways[0].restart(config=sg_conf, cluster_config=temp_cluster_config) assert status == 0, "Syncgateway did not start after no conflicts is enabled" # end of Set up # 2. Add docs to SG. sgdoc_bodies = document.create_docs(doc_id_prefix='sg_docs', number=num_of_docs, attachments_generator=attachment.generate_2_png_10_10, channels=channels) sg_docs = sg_client.add_bulk_docs(url=sg_url, db=sg_db, docs=sgdoc_bodies, auth=autouser_session) assert len(sgdoc_bodies) == num_of_docs # Connect to server via SDK log_info('Connecting to bucket ...') bucket_name = 'data-bucket' cbs_url = topology['couchbase_servers'][0] cbs_ip = host_for_url(cbs_url) sdk_client = Bucket('couchbase://{}/{}'.format(cbs_ip, bucket_name), password='******', timeout=SDK_TIMEOUT) sg_doc_ids = [doc['id'] for doc in sg_docs] sdk_docs_resp = sdk_client.get_multi(sg_doc_ids) # Update the same documents concurrently from a sync gateway client and and sdk client with ThreadPoolExecutor(max_workers=9) as tpe: update_from_sdk_task = tpe.submit(sdk_bulk_update, sdk_client, sdk_docs_resp, 10) update_from_sg_task = tpe.submit(sg_doc_updates, sg_client, sg_url=sg_url, sg_db=sg_db, sg_docs=sg_docs, number_updates=10, auth=autouser_session, channels=channels) update_from_sg_task.result() update_from_sdk_task.result() # 3. Update the docs few times prev_revs = [] for i in xrange(total_updates): update_sg_docs = sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=1, delay=None, auth=autouser_session, channels=channels) rev = update_sg_docs[0]['rev'].split('-')[1] prev_revs.append(rev) # 4. Verify it can maintain default revisions. # 5. Verify previous revisions deleted. for doc in sg_docs: num_of_revs = sg_client.get_revs_num_in_history(url=sg_url, db=sg_db, doc_id=doc["id"], auth=autouser_session) assert len(num_of_revs) == revs_limit, "Number of revisions in history is more than revs_limit set in sg config" for i in xrange(additional_updates): assert prev_revs[i] not in num_of_revs
def test_migrate_conflicts_to_noConflicts(params_from_base_test_setup, sg_conf_name, num_of_docs, revs_limit): """ @summary Migrating from no conflicts false to true Test case link : https://docs.google.com/spreadsheets/d/1YwI_gCeoBebQKBybkzoAEoXSc0XLReszDA-mPFQapgk/edit#gid=0 covered #6, #7, #8 Steps: 1. Start sg with default(i.e allow_conflicts=true) 2. Add docs to SG. 3. Update docs few times . 4. Create a conflicts and verify it is successful. 5. Modify sg config by enabling allow_conflicts to false 6. restart sg. 7. Create a conflict and verify conflict throws 409. 8. update docs few number of times. 9. Verify it can maintain default revisions. 10. Verify previous revisions deleted. """ # Setup cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] no_conflicts_enabled = params_from_base_test_setup["no_conflicts_enabled"] mode = params_from_base_test_setup["mode"] sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sync_gateway_version = params_from_base_test_setup["sync_gateway_version"] sg_db = "db" if revs_limit is None: revs_limit = 1000 additional_updates = revs_limit if no_conflicts_enabled or sync_gateway_version < "2.0": pytest.skip('--no-conflicts is enabled and does not work with sg < 2.0 , so skipping the test') sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) # 1. Start sg with default(i.e allow_conflicts=true) c = cluster.Cluster(cluster_config) c.reset(sg_conf) sg_client = MobileRestClient() channels = ["no-conflicts"] sg_client.create_user(url=sg_admin_url, db=sg_db, name='autotest', password='******', channels=channels) autouser_session = sg_client.create_session(url=sg_admin_url, db=sg_db, name='autotest', password='******') # end of Set up # 2. Add docs to SG. sgdoc_bodies = document.create_docs(doc_id_prefix='sg_docs', number=num_of_docs, attachments_generator=attachment.generate_2_png_10_10, channels=channels) sg_docs = sg_client.add_bulk_docs(url=sg_url, db=sg_db, docs=sgdoc_bodies, auth=autouser_session) assert len(sgdoc_bodies) == num_of_docs # 3. Update the docs few times prev_revs = [] for i in xrange(revs_limit): update_sg_docs = sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=1, delay=None, auth=autouser_session, channels=channels) rev = update_sg_docs[0]['rev'].split('-')[1] prev_revs.append(rev) # 4. Create a conflicts and verify it is successful. for doc in sg_docs: conflicted_rev = sg_client.add_conflict(url=sg_url, db=sg_db, doc_id=doc["id"], parent_revisions=doc["rev"], new_revision="2-foo", auth=autouser_session) assert conflicted_rev["rev"] == "2-foo" # 5. Enable allow_conflicts = false in SG config and 6. restart sg temp_cluster_config = copy_to_temp_conf(cluster_config, mode) persist_cluster_config_environment_prop(temp_cluster_config, 'no_conflicts_enabled', "True", property_name_check=False) persist_cluster_config_environment_prop(temp_cluster_config, 'revs_limit', revs_limit, property_name_check=False) status = c.sync_gateways[0].restart(config=sg_conf, cluster_config=temp_cluster_config) assert status == 0, "Syncgateway did not start after no conflicts is enabled" sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=1, auth=autouser_session, channels=channels) # 7. Create a conflict and verify conflict throws 409. for doc in sg_docs: with pytest.raises(HTTPError) as he: sg_client.add_conflict(url=sg_url, db=sg_db, doc_id=doc["id"], parent_revisions=doc["rev"], new_revision="2-foo1", auth=autouser_session) assert he.value.message.startswith('409 Client Error: Conflict for url:') # 8. update docs few number of times. update_sg_docs = sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=additional_updates, auth=autouser_session, channels=channels) # 9. Verify it can maintain default revisions. # 10. Verify previous revisions deleted. for doc in sg_docs: num_of_revs = sg_client.get_revs_num_in_history(url=sg_url, db=sg_db, doc_id=doc["id"], auth=autouser_session) assert len(num_of_revs) == revs_limit, "Number of revisions in history is more than revs_limit set in sg config" for i in xrange(additional_updates): assert prev_revs[i] not in num_of_revs
def test_view_backfill_for_deletes(params_from_base_test_setup, sg_conf_name, validate_changes_before_restart): """ Scenario: 1. Write a bunch of docs 2. Delete 1/2 3. Restart Sync Gateway 4. Issue _changes, assert view backfills docs and delete notifications """ num_docs = 1000 sg_db = 'db' cluster_conf = params_from_base_test_setup['cluster_config'] cluster_topology = params_from_base_test_setup['cluster_topology'] mode = params_from_base_test_setup['mode'] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) sg_admin_url = cluster_topology['sync_gateways'][0]['admin'] sg_url = cluster_topology['sync_gateways'][0]['public'] cbs_url = cluster_topology['couchbase_servers'][0] log_info('sg_conf: {}'.format(sg_conf)) log_info('sg_admin_url: {}'.format(sg_admin_url)) log_info('sg_url: {}'.format(sg_url)) log_info('cbs_url: {}'.format(cbs_url)) log_info('validate_changes_before_restart: {}'.format( validate_changes_before_restart)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) # Create clients sg_client = MobileRestClient() # Create user / session seth_user_info = UserInfo(name='seth', password='******', channels=['NASA', 'NATGEO'], roles=[]) sg_client.create_user(url=sg_admin_url, db=sg_db, name=seth_user_info.name, password=seth_user_info.password, channels=seth_user_info.channels) seth_auth = sg_client.create_session(url=sg_admin_url, db=sg_db, name=seth_user_info.name, password=seth_user_info.password) # Add 'num_docs' to Sync Gateway doc_bodies = document.create_docs('test_doc', number=num_docs, channels=seth_user_info.channels) bulk_resp = sg_client.add_bulk_docs(url=sg_url, db=sg_db, docs=doc_bodies, auth=seth_auth) assert len(bulk_resp) == num_docs # Delete half of the docs randomly deleted_docs = [] for _ in range(num_docs / 2): random_doc = random.choice(bulk_resp) deleted_doc = sg_client.delete_doc(url=sg_url, db=sg_db, doc_id=random_doc['id'], rev=random_doc['rev'], auth=seth_auth) deleted_docs.append(deleted_doc) bulk_resp.remove(random_doc) log_info('Number of docs deleted: {}'.format(len(deleted_docs))) all_docs = bulk_resp + deleted_docs log_info('Number of docs to look for in changes: {}'.format(len(all_docs))) # This test will check changes before and after SG restart if # validate_changes_before_restart == True # If it is not set to True, only build the changes after restart if validate_changes_before_restart: # Verify deletions and inital docs show up in changes feed sg_client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=all_docs, auth=seth_auth) changes = sg_client.get_changes(url=sg_url, db=sg_db, since=0, auth=seth_auth) # All docs should show up + _user doc assert len(changes['results']) == num_docs + 1 deleted_doc_ids = [doc['id'] for doc in deleted_docs] assert len(deleted_doc_ids) == num_docs / 2 deleted_docs_in_changes = [ change['id'] for change in changes['results'] if 'deleted' in change and change['deleted'] ] assert len(deleted_docs_in_changes) == num_docs / 2 # All deleted docs should show up in the changes feed for doc_id in deleted_docs_in_changes: assert doc_id in deleted_doc_ids deleted_doc_ids.remove(doc_id) assert len(deleted_doc_ids) == 0 # Restart Sync Gateway sg_controller = SyncGateway() sg_controller.stop_sync_gateways(url=sg_url, cluster_config=cluster_conf) sg_controller.start_sync_gateways(url=sg_url, cluster_config=cluster_conf, config=sg_conf) # Verify deletions and inital docs show up in changes feed sg_client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=all_docs, auth=seth_auth) changes = sg_client.get_changes(url=sg_url, db=sg_db, since=0, auth=seth_auth) # All docs should show up + _user doc assert len(changes['results']) == num_docs + 1 deleted_doc_ids = [doc['id'] for doc in deleted_docs] assert len(deleted_doc_ids) == num_docs / 2 deleted_docs_in_changes = [ change['id'] for change in changes['results'] if 'deleted' in change and change['deleted'] ] assert len(deleted_docs_in_changes) == num_docs / 2 # All deleted docs should show up in th changes feed for doc_id in deleted_docs_in_changes: assert doc_id in deleted_doc_ids deleted_doc_ids.remove(doc_id) assert len(deleted_doc_ids) == 0
def test_webhooks_crud(params_from_base_test_setup, sg_conf_name, filtered): """ Tests for webhook notification on import xattr mode 1. Start sync gateway with autoimport 1. Write 'num_docs_per_client' docs via SDK 1. Write 'num_docs_per_client' docs via SG 1. Verify 'num_docs_per_client' * 2 webhook events (id, rev, body) 1. Update SG docs once each via SDK 1. Update SDK docs once each via SG 1. Verify 'num_docs_per_client' * 2 webhook events (id, rev, body) 1. Delete SG docs via SDK 1. Delete SDK docs via SG 1. Verify 'num_docs_per_client' * 2 webhook events (id, rev, body) to verify no dups, wait 10s after recieveing expected webhooks docmeta mode 1. Write 'num_docs_per_client' docs via SG 1. Verify 'num_docs_per_client' webhook events (id, rev, body) 1. Update SG docs once each via SG 1. Verify 'num_docs_per_client' webhook events (id, rev, body) 1. Delete SG docs via SG 1. Verify 'num_docs_per_client' webhook events (id, rev, body) if filtered, the scenario will add a filtered propery to every other doc. The webhook validation will only look for the filtered docs """ xattrs_enabled = params_from_base_test_setup['xattrs_enabled'] log_info('Webhooks filtered?: {}'.format(filtered)) cluster_conf = params_from_base_test_setup['cluster_config'] cluster_topology = params_from_base_test_setup['cluster_topology'] mode = params_from_base_test_setup['mode'] sg_admin_url = cluster_topology['sync_gateways'][0]['admin'] sg_url = cluster_topology['sync_gateways'][0]['public'] cbs_url = cluster_topology['couchbase_servers'][0] sg_db = 'db' bucket_name = 'data-bucket' num_docs_per_client = 100 sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster = Cluster(config=cluster_conf) cluster.reset(sg_conf) # Start webhook server on test runner webhook_server = WebServer() webhook_server.start() sg_client = MobileRestClient() cbs_ip = host_for_url(cbs_url) sdk_client = Bucket('couchbase://{}/{}'.format(cbs_ip, bucket_name), password='******') sg_info = UserInfo('sg_user', 'pass', channels=['shared'], roles=[]) sdk_info = UserInfo('sdk_user', 'pass', channels=['shared'], roles=[]) sg_client.create_user(url=sg_admin_url, db=sg_db, name=sg_info.name, password=sg_info.password, channels=sg_info.channels) sg_auth = sg_client.create_session(url=sg_admin_url, db=sg_db, name=sg_info.name, password=sg_info.password) # Create sg docs doc_content = {'aphex': 'twin'} sg_docs = document.create_docs(doc_id_prefix='sg_user_doc', number=num_docs_per_client, content=doc_content, channels=sg_info.channels) # Add filtered property to every other doc count = 0 for sg_doc in sg_docs: if count % 2 == 0: sg_doc['filtered'] = True count += 1 sg_doc_ids = [doc['_id'] for doc in sg_docs] sg_filtered_doc_ids = [doc['_id'] for doc in sg_docs if 'filtered' in doc] assert len(sg_doc_ids) == num_docs_per_client assert len(sg_filtered_doc_ids) == num_docs_per_client / 2 # Create sdk docs sdk_docs = { 'sdk_user_doc_{}'.format(i): { 'channels': sdk_info.channels, 'content': doc_content } for i in range(num_docs_per_client) } # Add filtered property to every other doc count = 0 for _, doc_val in sdk_docs.items(): if count % 2 == 0: doc_val['filtered'] = True count += 1 sdk_doc_ids = [doc for doc in sdk_docs] sdk_filtered_doc_ids = [k for k, v in sdk_docs.items() if 'filtered' in v] assert len(sdk_doc_ids) == num_docs_per_client assert len(sdk_filtered_doc_ids) == num_docs_per_client / 2 all_docs = sg_doc_ids + sdk_doc_ids all_filtered_docs = sg_filtered_doc_ids + sdk_filtered_doc_ids assert len(all_docs) == num_docs_per_client * 2 # If xattr mode, add sg + sdk docs # If non xattr mode, add sg docs add_docs(sg_client=sg_client, sg_url=sg_url, sg_db=sg_db, sg_docs=sg_docs, sg_auth=sg_auth, sdk_client=sdk_client, sdk_docs=sdk_docs, num_docs_per_client=num_docs_per_client, xattrs=xattrs_enabled) # Wait for added docs to trigger webhooks if xattrs_enabled and filtered: poll_for_webhook_data(webhook_server, all_filtered_docs, 1, doc_content) elif xattrs_enabled and not filtered: poll_for_webhook_data(webhook_server, all_docs, 1, doc_content) elif not xattrs_enabled and filtered: poll_for_webhook_data(webhook_server, sg_filtered_doc_ids, 1, doc_content) else: poll_for_webhook_data(webhook_server, sg_doc_ids, 1, doc_content) webhook_server.clear_data() # Update sdk docs from sg # If xattr mode, update sdk docs from sg, update sg docs from SDK # If non xattr mode, update sg docs from sg updated_doc_content = {'brian': 'eno'} update_docs(sg_client=sg_client, sg_url=sg_url, sg_db=sg_db, sg_doc_ids=sg_doc_ids, sg_auth=sg_auth, sdk_client=sdk_client, sdk_doc_ids=sdk_doc_ids, updated_doc_content=updated_doc_content, xattrs=xattrs_enabled) # Wait for updates to trigger webhooks if xattrs_enabled and filtered: poll_for_webhook_data(webhook_server, all_filtered_docs, 2, updated_doc_content) elif xattrs_enabled and not filtered: poll_for_webhook_data(webhook_server, all_docs, 2, updated_doc_content) elif not xattrs_enabled and filtered: poll_for_webhook_data(webhook_server, sg_filtered_doc_ids, 2, updated_doc_content) else: poll_for_webhook_data(webhook_server, sg_doc_ids, 2, updated_doc_content) webhook_server.clear_data() delete_docs(sg_client=sg_client, sg_url=sg_url, sg_db=sg_db, sg_doc_ids=sg_doc_ids, sg_auth=sg_auth, sdk_client=sdk_client, sdk_doc_ids=sdk_doc_ids, xattrs=xattrs_enabled) # Wait for deletes to trigger webhook events, filter includes all deleted docs if xattrs_enabled: poll_for_webhook_data(webhook_server, all_docs, 3, updated_doc_content, deleted=True) else: poll_for_webhook_data(webhook_server, sg_doc_ids, 3, updated_doc_content, deleted=True) webhook_server.clear_data() # Stop webhook server webhook_server.stop()
def test_setting_expiry_in_bulk_docs(params_from_base_test_setup, sg_conf_name): """ 1. PUT /db/_bulk_docs with 10 documents. Set the "_exp":3 on 5 of these documents 2. Wait five seconds 3. POST /db/_bulk_get for the 10 documents. Validate that only the 5 non-expiring documents are returned """ cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] xattrs_enabled = params_from_base_test_setup['xattrs_enabled'] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper = ClusterKeywords() topology = cluster_helper.get_cluster_topology(cluster_config) cluster_helper.reset_cluster(cluster_config=cluster_config, sync_gateway_config=sg_conf) cbs_url = topology["couchbase_servers"][0] sg_url = topology["sync_gateways"][0]["public"] sg_url_admin = topology["sync_gateways"][0]["admin"] log_info("Running 'test_setting_expiry_in_bulk_docs'") log_info("cbs_url: {}".format(cbs_url)) log_info("sg_url: {}".format(sg_url)) log_info("sg_url_admin: {}".format(sg_url_admin)) sg_db = "db" sg_user_name = "sg_user" sg_user_password = "******" sg_user_channels = ["NBC", "ABC"] bucket_name = "data-bucket" cbs_ip = host_for_url(cbs_url) sdk_client = Bucket('couchbase://{}/{}'.format(cbs_ip, bucket_name), password='******') client = MobileRestClient() client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels) sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name) doc_exp_3_bodies = document.create_docs(doc_id_prefix="exp_3", number=5, expiry=3, channels=sg_user_channels) doc_exp_10_bodies = document.create_docs(doc_id_prefix="exp_10", number=5, expiry=10, channels=sg_user_channels) bulk_bodies = doc_exp_3_bodies + doc_exp_10_bodies bulk_docs = client.add_bulk_docs(url=sg_url, db=sg_db, docs=bulk_bodies, auth=sg_user_session) # Allow exp_3 docs to expire time.sleep(5) bulk_docs_ids = [doc["id"] for doc in bulk_docs] expected_ids = ["exp_10_0", "exp_10_1", "exp_10_2", "exp_10_3", "exp_10_4"] expected_missing_ids = [ "exp_3_0", "exp_3_1", "exp_3_2", "exp_3_3", "exp_3_4" ] bulk_get_docs, errors = client.get_bulk_docs(url=sg_url, db=sg_db, doc_ids=bulk_docs_ids, auth=sg_user_session, validate=False) assert len(bulk_get_docs) == len(expected_ids) assert len(errors) == len(expected_missing_ids) bulk_get_doc_ids = [doc["_id"] for doc in bulk_get_docs] error_ids = [doc["id"] for doc in errors] assert bulk_get_doc_ids == expected_ids assert error_ids == expected_missing_ids client.verify_doc_ids_found_in_response(response=bulk_get_docs, expected_doc_ids=expected_ids) client.verify_doc_ids_not_found_in_response( response=errors, expected_missing_doc_ids=expected_missing_ids) for expired_doc in error_ids: verify_doc_deletion_on_server(doc_id=expired_doc, sdk_client=sdk_client, sg_client=client, sg_admin_url=sg_url_admin, sg_db=sg_db, xattrs_enabled=xattrs_enabled)
def test_take_all_sgaccels_down(params_from_base_test_setup, sg_conf): """ Scenario that takes all sync_gateway accel nodes offline during doc load. After bring the nodes back online during load, the reshard of the DCP feed is verified. The changes feed is verified that all docs show up. 1. Start doc load (1000 doc) 2. Take all sg_accel nodes down in parallel 3. Verify node are down 4. Wait for doc adds to complete, store "doc_push_result_1" 5. Verify "doc_push_result_1" docs added 6. Start doc load (1000 docs) 7. Wait for 5. to complete, store "doc_push_result_2" 8. Verify "doc_push_result_2" docs added 9. Start another doc load (1000 docs) 10. Bring up nodes in parallel 11. poll on p-index reshard 12. Wait for 9. to complete, store "doc_push_result_3" 13. Verify "doc_push_result_3" docs added 14. Verify "doc_push_result_1" + "doc_push_result_2" + "doc_push_result_3" show up in _changes feed """ cluster_conf = params_from_base_test_setup["cluster_config"] log_info("Running 'test_dcp_reshard_single_sg_accel_goes_down_and_up'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) cluster_util = ClusterKeywords() topology = cluster_util.get_cluster_topology(cluster_conf) sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" num_docs = 1000 client = MobileRestClient() doc_pusher_user_info = userinfo.UserInfo("doc_pusher", "pass", channels=["A"], roles=[]) doc_pusher_auth = client.create_user( url=sg_admin_url, db=sg_db, name=doc_pusher_user_info.name, password=doc_pusher_user_info.password, channels=doc_pusher_user_info.channels ) a_user_info = userinfo.UserInfo("a_user", "pass", channels=["A"], roles=[]) client.create_user( url=sg_admin_url, db=sg_db, name=a_user_info.name, password=a_user_info.password, channels=a_user_info.channels ) a_user_session = client.create_session( url=sg_admin_url, db=sg_db, name=a_user_info.name, password=a_user_info.password ) # Shutdown all accel nodes in parallel with concurrent.futures.ThreadPoolExecutor(max_workers=3) as ex: # Start adding docs docs_1 = document.create_docs(None, num_docs, channels=doc_pusher_user_info.channels) docs_1_task = ex.submit(client.add_bulk_docs, url=sg_url, db=sg_db, docs=docs_1, auth=doc_pusher_auth) # Take down all access nodes log_info("Shutting down sg_accels: [{}, {}, {}] ...".format( cluster.sg_accels[0], cluster.sg_accels[1], cluster.sg_accels[2] )) sg_accel_down_task_1 = ex.submit(cluster.sg_accels[0].stop) sg_accel_down_task_2 = ex.submit(cluster.sg_accels[1].stop) sg_accel_down_task_3 = ex.submit(cluster.sg_accels[2].stop) assert sg_accel_down_task_1.result() == 0 assert sg_accel_down_task_2.result() == 0 assert sg_accel_down_task_3.result() == 0 # Block until bulk_docs is complete doc_push_result_1 = docs_1_task.result() assert len(doc_push_result_1) == num_docs client.verify_docs_present(url=sg_url, db=sg_db, expected_docs=doc_push_result_1, auth=doc_pusher_auth) # Load sync_gateway with another batch of docs while the sg_accel nodes are offline docs_2_bodies = document.create_docs(None, num_docs, channels=doc_pusher_user_info.channels) docs_push_result_2 = client.add_bulk_docs(url=sg_url, db=sg_db, docs=docs_2_bodies, auth=doc_pusher_auth) assert len(docs_push_result_2) == num_docs client.verify_docs_present(url=sg_url, db=sg_db, expected_docs=docs_push_result_2, auth=doc_pusher_auth) # Start loading Sync Gateway with another set of docs while bringing the sg_accel nodes online docs_3 = document.create_docs(None, num_docs, channels=doc_pusher_user_info.channels) docs_3_task = ex.submit(client.add_bulk_docs, url=sg_url, db=sg_db, docs=docs_3, auth=doc_pusher_auth) # Bring all the sg_accel nodes back up # Take down all access nodes log_info("Starting sg_accels: [{}, {}, {}] ...".format( cluster.sg_accels[0], cluster.sg_accels[1], cluster.sg_accels[2] )) sg_accel_up_task_1 = ex.submit(cluster.sg_accels[0].start, sg_conf) sg_accel_up_task_2 = ex.submit(cluster.sg_accels[1].start, sg_conf) sg_accel_up_task_3 = ex.submit(cluster.sg_accels[2].start, sg_conf) assert sg_accel_up_task_1.result() == 0 assert sg_accel_up_task_2.result() == 0 assert sg_accel_up_task_3.result() == 0 # Wait for pindex to reshard correctly assert cluster.validate_cbgt_pindex_distribution_retry(3) # Block until second bulk_docs is complete doc_push_result_3 = docs_3_task.result() assert len(doc_push_result_3) == num_docs client.verify_docs_present(url=sg_url, db=sg_db, expected_docs=doc_push_result_3, auth=doc_pusher_auth) # Combine the 3 push results and make sure the changes propagate to a_user # a_user has access to the doc's channel. log_info("Verifying all the changes show up for 'a_user' ...") all_docs = doc_push_result_1 + docs_push_result_2 + doc_push_result_3 client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=all_docs, auth=a_user_session, polling_interval=2)
def test_detect_stale_channel_index(params_from_base_test_setup, sg_conf): """ 1. Bring up single Sync Gateway node, backed by Couchbase Server with 3 accels indexing 2. Configure such that the primary bucket and the channel index bucket are different (which is the norm) 3. Add 1000 documents 4. Shutdown Sync Gateway 5. Delete / create the primary bucket ('data-bucket'), but do not touch the channel index bucket 6. Start Sync Gateway 7. Assert that sync_gateway fails to start due to stale channel index """ cluster_conf = params_from_base_test_setup["cluster_config"] log_info("Running 'test_detect_stale_channel_index'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) cluster_util = ClusterKeywords() topology = cluster_util.get_cluster_topology(cluster_conf) sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] cb_server_url = topology["couchbase_servers"][0] sg_db = "db" num_docs = 1000 cb_server = couchbaseserver.CouchbaseServer(url=cb_server_url) client = MobileRestClient() # Create doc pusher user doc_pusher_user_info = userinfo.UserInfo(name="doc_pusher", password="******", channels=["NASA"], roles=[]) doc_pusher_auth = client.create_user( url=sg_admin_url, db=sg_db, name=doc_pusher_user_info.name, password=doc_pusher_user_info.password, channels=doc_pusher_user_info.channels) # Add some docs to Sync Gateway to cause indexing docs = document.create_docs(None, number=num_docs, channels=doc_pusher_user_info.channels) pushed_docs = client.add_bulk_docs(url=sg_url, db=sg_db, docs=docs, auth=doc_pusher_auth) assert len(pushed_docs) == num_docs # Shut down sync_gateway sg_util = SyncGateway() sg_util.stop_sync_gateways(cluster_config=cluster_conf, url=sg_url) # Delete server bucket cb_server.delete_bucket(name="data-bucket") # Create server bucket ram_per_bucket_mb = cb_server.get_ram_per_bucket(num_buckets=2) cb_server.create_bucket(name="data-bucket", ram_quota_mb=ram_per_bucket_mb) # Start sync_gateway and assert that a Provisioning error is raised due to detecting stale index with pytest.raises(exceptions.ProvisioningError): sg_util.start_sync_gateways(cluster_config=cluster_conf, url=sg_url, config=sg_conf) # TODO: To make this check even more accurate, could # run remote ssh command "systemctl status sync_gateway.service" and look for # regex pattern: Main PID: 7185 (code=exited, status=2) # Delete index bucket and recreate it cb_server.delete_bucket(name="index-bucket") cb_server.create_bucket(name="index-bucket", ram_quota_mb=ram_per_bucket_mb) # Start sync gateway, should succeed now sg_util.start_sync_gateways(cluster_config=cluster_conf, url=sg_url, config=sg_conf)
def test_no_conflicts_update_revs_limit(params_from_base_test_setup, sg_conf_name, num_of_docs, revs_limit): """ @summary Enable no conflicts and with non default revs_limit and verify revs_limit is maintained Test case link : https://docs.google.com/spreadsheets/d/1YwI_gCeoBebQKBybkzoAEoXSc0XLReszDA-mPFQapgk/edit#gid=0 covered #14 Steps: 1. Enable allow_conflicts = false in SG config with parametried revs_limit 2. Add docs to SG. 3. Update the more than revs_limit. 4. Check the revision list for the doc. 5. Modify the revs_limit to 2 6. Update doc 7. Verify the revision history shows only 2 revisions now 8. Verify previous revisions deleted """ # Setup cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] no_conflicts_enabled = params_from_base_test_setup["no_conflicts_enabled"] mode = params_from_base_test_setup["mode"] sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" reduced_revs_limit = revs_limit - 3 total_updates = revs_limit + 5 if not no_conflicts_enabled: pytest.skip('--no-conflicts is not enabled, so skipping the test') sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) c = cluster.Cluster(cluster_config) c.reset(sg_conf) sg_client = MobileRestClient() channels = ["no-conflicts"] sg_client.create_user(url=sg_admin_url, db=sg_db, name='autotest', password='******', channels=channels) autouser_session = sg_client.create_session(url=sg_admin_url, db=sg_db, name='autotest', password='******') # end of Set up # 1. Enable allow_conflicts = false in SG config with revs_limit=5 temp_cluster_config = copy_to_temp_conf(cluster_config, mode) persist_cluster_config_environment_prop(temp_cluster_config, 'revs_limit', revs_limit, property_name_check=False) status = c.sync_gateways[0].restart(config=sg_conf, cluster_config=temp_cluster_config) assert status == 0, "Syncgateway did not start after having revs_limit 1 with no conflicts mode" # 2. Add docs to SG. sgdoc_bodies = document.create_docs(doc_id_prefix="sg_docs", number=num_of_docs, channels=channels) sg_docs = sg_client.add_bulk_docs(url=sg_url, db=sg_db, docs=sgdoc_bodies, auth=autouser_session) assert len(sgdoc_bodies) == num_of_docs # 3. Update the docs few times prev_revs = [] for i in xrange(total_updates): update_sg_docs = sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=1, delay=None, auth=autouser_session, channels=channels) rev = update_sg_docs[0]['rev'].split('-')[1] prev_revs.append(rev) # 4. Get number of revisions and verify length is equal to revs_limit set to for doc in sg_docs: num_of_revs = sg_client.get_revs_num_in_history(url=sg_url, db=sg_db, doc_id=doc["id"], auth=autouser_session) assert len(num_of_revs) == revs_limit, "Number of revisions in history is more than revs_limit set in sg config" for i in xrange(5): assert prev_revs[i] not in num_of_revs # 5. Modify the revs_limit to 2 temp_cluster_config = copy_to_temp_conf(cluster_config, mode) persist_cluster_config_environment_prop(temp_cluster_config, 'revs_limit', reduced_revs_limit, property_name_check=False) status = c.sync_gateways[0].restart(config=sg_conf, cluster_config=temp_cluster_config) assert status == 0, "Syncgateway did not start after having revs_limit 2 with no conflicts mode" # 6. Update the docs 1 more time sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=2, delay=None, auth=autouser_session, channels=channels) # 7. Get number of revisions and verify number of revisions is equivalent to revs_limit set to for doc in sg_docs: num_of_revs = sg_client.get_revs_num_in_history(url=sg_url, db=sg_db, doc_id=doc["id"], auth=autouser_session) assert len(num_of_revs) == 2, "Number of revisions in history is more than revs_limit set in sg config" for i in xrange(total_updates - reduced_revs_limit): assert prev_revs[i] not in num_of_revs