def test_peer_2_peer_sanity_push_one_shot_continuous(setup_p2p_test): """ 1. Create ls_db1 database on LiteServ One 2. Create ls_db2 database on LiteServ Two 3. Add 10000 docs to LiteServ 1 ls_db1 4. Create one shot push replication LiteServ 1 ls_db1 -> LiteServ 2 ls_db2 5. sleep for 5 seconds 6. Create continuous push replication LiteServ 1 ls_db1 -> LiteServ 2 ls_db2 7. Add 10000 docs to LiteServ 1 ls_db1 8. Verify all docs replicate to LiteServ 2 ls_db2 9. Verify all docs show up in changes for LiteServ 2 ls_db2 """ ls_url_one = setup_p2p_test["ls_url_one"] ls_url_two = setup_p2p_test["ls_url_two"] num_docs_per_db = 10000 log_info("ls_url_one: {}".format(ls_url_one)) log_info("ls_url_two: {}".format(ls_url_two)) client = MobileRestClient() log_info("Creating databases") ls_db1 = client.create_database(url=ls_url_one, name="ls_db1") ls_db2 = client.create_database(url=ls_url_two, name="ls_db2") bulk_docs = create_docs("test_ls_db1_oneshot", num_docs_per_db) ls_db1_docs_oneshot = client.add_bulk_docs(ls_url_one, ls_db1, bulk_docs) assert len(ls_db1_docs_oneshot) == num_docs_per_db # Setup one shot push replication from LiteServ 1 ls_db1 to LiteServ 2 ls_db2 log_info("Setting up a one-shot push replication from ls_db1 to ls_db2") push_repl = client.start_replication( url=ls_url_one, continuous=False, from_db=ls_db1, to_url=ls_url_two, to_db=ls_db2, ) log_info("Replication ID: {}".format(push_repl)) client.verify_docs_present(url=ls_url_two, db=ls_db2, expected_docs=ls_db1_docs_oneshot) client.verify_docs_in_changes(url=ls_url_two, db=ls_db2, expected_docs=ls_db1_docs_oneshot) # Setup continuous push replication from LiteServ 1 ls_db1 to LiteServ 2 ls_db2 log_info("Setting up a continuous push replication from ls_db1 to ls_db2") push_repl = client.start_replication( url=ls_url_one, continuous=True, from_db=ls_db1, to_url=ls_url_two, to_db=ls_db2, ) log_info("Replication ID: {}".format(push_repl)) ls_db1_docs = client.add_docs(url=ls_url_one, db=ls_db1, number=num_docs_per_db, id_prefix="test_ls_db1") assert len(ls_db1_docs) == num_docs_per_db client.verify_docs_present(url=ls_url_two, db=ls_db2, expected_docs=ls_db1_docs_oneshot + ls_db1_docs) client.verify_docs_in_changes(url=ls_url_two, db=ls_db2, expected_docs=ls_db1_docs_oneshot + ls_db1_docs)
def test_peer_2_peer_sanity_pull(setup_p2p_test, num_docs_per_db, seeded_db, attachments_generator): """ 1. Create ls_db1 database on LiteServ One 2. Create ls_db2 database on LiteServ Two 3. Create continuous pull replication LiteServ 1 ls_db1 <- LiteServ 2 ls_db2 4. Add 5000 docs to LiteServ 2 ls_db2 5. Verify all docs replicate to LiteServ 1 ls_db1 6. Verify all docs show up in changes for LiteServ 1 ls_db1 """ ls_url_one = setup_p2p_test["ls_url_one"] ls_url_two = setup_p2p_test["ls_url_two"] log_info("ls_url_one: {}".format(ls_url_one)) log_info("ls_url_two: {}".format(ls_url_two)) attachments = False if attachments_generator: log_info("Running test_peer_2_peer_sanity_pull with attachment {}".format(attachments_generator)) attachments = True client = MobileRestClient() log_info("Creating databases") ls_db1 = client.create_database(url=ls_url_one, name="ls_db1") ls_db2 = client.create_database(url=ls_url_two, name="ls_db2") if seeded_db: bulk_docs = create_docs("test_ls_db2_seed", num_docs_per_db) ls_db2_docs_seed = client.add_bulk_docs(url=ls_url_two, db=ls_db2, docs=bulk_docs) assert len(ls_db2_docs_seed) == num_docs_per_db # Setup continuous pull replication from LiteServ 2 ls_db2 to LiteServ 1 ls_db1 pull_repl = client.start_replication( url=ls_url_one, continuous=True, from_url=ls_url_two, from_db=ls_db2, to_db=ls_db1 ) client.wait_for_replication_status_idle(url=ls_url_one, replication_id=pull_repl) ls_db2_docs = client.add_docs(url=ls_url_two, db=ls_db2, number=num_docs_per_db, id_prefix="test_ls_db2", attachments_generator=attachments_generator) assert len(ls_db2_docs) == num_docs_per_db client.verify_docs_present(url=ls_url_one, db=ls_db1, expected_docs=ls_db2_docs, attachments=attachments) client.verify_docs_in_changes(url=ls_url_one, db=ls_db1, expected_docs=ls_db2_docs) total_ls_db2_docs = ls_db2_docs if seeded_db: total_ls_db2_docs += ls_db2_docs_seed client.verify_docs_present(url=ls_url_one, db=ls_db1, expected_docs=total_ls_db2_docs, attachments=attachments) client.verify_docs_in_changes(url=ls_url_one, db=ls_db1, expected_docs=total_ls_db2_docs)
def test_peer_2_peer_sanity_pull(setup_p2p_test): """ 1. Create ls_db1 database on LiteServ One 2. Create ls_db2 database on LiteServ Two 3. Create continuous pull replication LiteServ 1 ls_db1 <- LiteServ 2 ls_db2 4. Add 5000 docs to LiteServ 2 ls_db2 5. Verify all docs replicate to LiteServ 1 ls_db1 6. Verify all docs show up in changes for LiteServ 1 ls_db1 """ ls_url_one = setup_p2p_test["ls_url_one"] ls_url_two = setup_p2p_test["ls_url_two"] num_docs_per_db = 5000 log_info("ls_url_one: {}".format(ls_url_one)) log_info("ls_url_two: {}".format(ls_url_two)) client = MobileRestClient() log_info("Creating databases") ls_db1 = client.create_database(url=ls_url_one, name="ls_db1") ls_db2 = client.create_database(url=ls_url_two, name="ls_db2") # Setup continuous pull replication from LiteServ 2 ls_db2 to LiteServ 1 ls_db1 pull_repl = client.start_replication( url=ls_url_one, continuous=True, from_url=ls_url_two, from_db=ls_db2, to_db=ls_db1 ) client.wait_for_replication_status_idle(url=ls_url_one, replication_id=pull_repl) ls_db2_docs = client.add_docs(url=ls_url_two, db=ls_db2, number=num_docs_per_db, id_prefix="test_ls_db2") assert len(ls_db2_docs) == num_docs_per_db client.verify_docs_present(url=ls_url_one, db=ls_db1, expected_docs=ls_db2_docs) client.verify_docs_in_changes(url=ls_url_one, db=ls_db1, expected_docs=ls_db2_docs)
def test_peer_2_peer_sanity_pull_one_shot(setup_p2p_test): """ 1. Create ls_db1 database on LiteServ One 2. Create ls_db2 database on LiteServ Two 3. Add 10000 docs to LiteServ 2 ls_db2 4. Create one shot pull replication LiteServ 1 ls_db1 <- LiteServ 2 ls_db2 5. Verify all docs replicate to LiteServ 1 ls_db1 6. Verify all docs show up in changes for LiteServ 1 ls_db1 """ ls_url_one = setup_p2p_test["ls_url_one"] ls_url_two = setup_p2p_test["ls_url_two"] num_docs_per_db = 10000 log_info("ls_url_one: {}".format(ls_url_one)) log_info("ls_url_two: {}".format(ls_url_two)) client = MobileRestClient() log_info("Creating databases") ls_db1 = client.create_database(url=ls_url_one, name="ls_db1") ls_db2 = client.create_database(url=ls_url_two, name="ls_db2") bulk_docs = create_docs("test_ls_db2", num_docs_per_db) ls_db2_docs = client.add_bulk_docs(url=ls_url_two, db=ls_db2, docs=bulk_docs) assert len(ls_db2_docs) == num_docs_per_db # Setup one shot pull replication from LiteServ 2 ls_db2 to LiteServ 1 ls_db1 log_info("Setting up a one-shot pull replication from ls_db2 to ls_db1") pull_repl = client.start_replication( url=ls_url_one, continuous=False, from_url=ls_url_two, from_db=ls_db2, to_db=ls_db1 ) log_info("Replication ID: {}".format(pull_repl)) client.verify_docs_present(url=ls_url_one, db=ls_db1, expected_docs=ls_db2_docs) client.verify_docs_in_changes(url=ls_url_one, db=ls_db1, expected_docs=ls_db2_docs)
def test_rollback_server_reset(params_from_base_test_setup, sg_conf_name): """ Test for sync gateway resiliency under Couchbase Server rollback Scenario 1. Create user (seth:pass) and session 2. Add docs targeting all vbuckets except 66 3. Add docs to vbucket 66 4. Verify the docs show up in seth's changes feed 5. Delete vBucket 66 file on server 6. Restart server 7. User should only see docs not in vbucket 66 """ num_vbuckets = 1024 cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] mode = params_from_base_test_setup["mode"] sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] cb_server_url = topology["couchbase_servers"][0] cb_server = couchbaseserver.CouchbaseServer(cb_server_url) sg_db = "db" if mode == "cc": pytest.skip("Rollback not supported in channel cache mode") sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster = Cluster(cluster_config) cluster.reset(sg_conf) client = MobileRestClient() seth_user_info = userinfo.UserInfo("seth", "pass", channels=["NASA"], roles=[]) client.create_user(url=sg_admin_url, db=sg_db, name=seth_user_info.name, password=seth_user_info.password, channels=seth_user_info.channels) seth_session = client.create_session(url=sg_admin_url, db=sg_db, name=seth_user_info.name, password=seth_user_info.password) # create a doc that will hash to each vbucket in parallel except for vbucket 66 doc_id_for_every_vbucket_except_66 = [] with concurrent.futures.ProcessPoolExecutor() as pex: futures = [ pex.submit(document.generate_doc_id_for_vbucket, i) for i in range(num_vbuckets) if i != 66 ] for future in concurrent.futures.as_completed(futures): doc_id = future.result() doc = document.create_doc(doc_id=doc_id, channels=seth_user_info.channels) doc_id_for_every_vbucket_except_66.append(doc) vbucket_66_docs = [] for _ in range(5): vbucket_66_docs.append( document.create_doc( doc_id=document.generate_doc_id_for_vbucket(66), channels=seth_user_info.channels)) seth_docs = client.add_bulk_docs(url=sg_url, db=sg_db, docs=doc_id_for_every_vbucket_except_66, auth=seth_session) seth_66_docs = client.add_bulk_docs(url=sg_url, db=sg_db, docs=vbucket_66_docs, auth=seth_session) assert len(seth_docs) == num_vbuckets - 1 assert len(seth_66_docs) == 5 # Verify the all docs show up in seth's changes feed all_docs = seth_docs + seth_66_docs assert len(all_docs) == (num_vbuckets - 1) + 5 client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=all_docs, auth=seth_session) # Delete vbucket and restart server cb_server.delete_vbucket(66, "data-bucket") cb_server.restart() max_retries = 50 count = 0 while count != max_retries: # Try to get changes, sync gateway should be able to recover and return changes # A changes since=0 should now be in a rolled back state due to the data loss from the removed vbucket # Seth should only see the docs not present in vbucket 66, unlike all the docs as above. try: changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=seth_session) changes_ids = [ change["id"] for change in changes["results"] if not change["id"].startswith("_user") ] log_info("length of Changes ids are {} and vbuckets {}".format( len(changes_ids), num_vbuckets)) if len(changes_ids) == (num_vbuckets - 1): break except HTTPError: if changes.status_code == 503: log_info('server is still down') else: raise time.sleep(1) count += 1 # Verify that seth 66 doc does not appear in changes as vbucket 66 got deleted vbucket_66_docids = [doc1["id"] for doc1 in seth_66_docs] for doc_66 in vbucket_66_docids: assert doc_66 not in changes_ids, "doc {} in vbucket 66 shows up in changes ".format( doc_66)
def test_take_all_sgaccels_down(params_from_base_test_setup, sg_conf): """ Scenario that takes all sync_gateway accel nodes offline during doc load. After bring the nodes back online during load, the reshard of the DCP feed is verified. The changes feed is verified that all docs show up. 1. Start doc load (1000 doc) 2. Take all sg_accel nodes down in parallel 3. Verify node are down 4. Wait for doc adds to complete, store "doc_push_result_1" 5. Verify "doc_push_result_1" docs added 6. Start doc load (1000 docs) 7. Wait for 5. to complete, store "doc_push_result_2" 8. Verify "doc_push_result_2" docs added 9. Start another doc load (1000 docs) 10. Bring up nodes in parallel 11. poll on p-index reshard 12. Wait for 9. to complete, store "doc_push_result_3" 13. Verify "doc_push_result_3" docs added 14. Verify "doc_push_result_1" + "doc_push_result_2" + "doc_push_result_3" show up in _changes feed """ cluster_conf = params_from_base_test_setup["cluster_config"] log_info("Running 'test_dcp_reshard_single_sg_accel_goes_down_and_up'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) cluster_util = ClusterKeywords() topology = cluster_util.get_cluster_topology(cluster_conf) sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" num_docs = 1000 client = MobileRestClient() doc_pusher_user_info = userinfo.UserInfo("doc_pusher", "pass", channels=["A"], roles=[]) doc_pusher_auth = client.create_user( url=sg_admin_url, db=sg_db, name=doc_pusher_user_info.name, password=doc_pusher_user_info.password, channels=doc_pusher_user_info.channels) a_user_info = userinfo.UserInfo("a_user", "pass", channels=["A"], roles=[]) client.create_user(url=sg_admin_url, db=sg_db, name=a_user_info.name, password=a_user_info.password, channels=a_user_info.channels) a_user_session = client.create_session(url=sg_admin_url, db=sg_db, name=a_user_info.name, password=a_user_info.password) # Shutdown all accel nodes in parallel with concurrent.futures.ThreadPoolExecutor(max_workers=3) as ex: # Start adding docs docs_1 = document.create_docs(None, num_docs, channels=doc_pusher_user_info.channels) docs_1_task = ex.submit(client.add_bulk_docs, url=sg_url, db=sg_db, docs=docs_1, auth=doc_pusher_auth) # Take down all access nodes log_info("Shutting down sg_accels: [{}, {}, {}] ...".format( cluster.sg_accels[0], cluster.sg_accels[1], cluster.sg_accels[2])) sg_accel_down_task_1 = ex.submit(cluster.sg_accels[0].stop) sg_accel_down_task_2 = ex.submit(cluster.sg_accels[1].stop) sg_accel_down_task_3 = ex.submit(cluster.sg_accels[2].stop) assert sg_accel_down_task_1.result() == 0 assert sg_accel_down_task_2.result() == 0 assert sg_accel_down_task_3.result() == 0 # Block until bulk_docs is complete doc_push_result_1 = docs_1_task.result() assert len(doc_push_result_1) == num_docs client.verify_docs_present(url=sg_url, db=sg_db, expected_docs=doc_push_result_1, auth=doc_pusher_auth) # Load sync_gateway with another batch of docs while the sg_accel nodes are offline docs_2_bodies = document.create_docs( None, num_docs, channels=doc_pusher_user_info.channels) docs_push_result_2 = client.add_bulk_docs(url=sg_url, db=sg_db, docs=docs_2_bodies, auth=doc_pusher_auth) assert len(docs_push_result_2) == num_docs client.verify_docs_present(url=sg_url, db=sg_db, expected_docs=docs_push_result_2, auth=doc_pusher_auth) # Start loading Sync Gateway with another set of docs while bringing the sg_accel nodes online docs_3 = document.create_docs(None, num_docs, channels=doc_pusher_user_info.channels) docs_3_task = ex.submit(client.add_bulk_docs, url=sg_url, db=sg_db, docs=docs_3, auth=doc_pusher_auth) # Bring all the sg_accel nodes back up # Take down all access nodes log_info("Starting sg_accels: [{}, {}, {}] ...".format( cluster.sg_accels[0], cluster.sg_accels[1], cluster.sg_accels[2])) sg_accel_up_task_1 = ex.submit(cluster.sg_accels[0].start, sg_conf) sg_accel_up_task_2 = ex.submit(cluster.sg_accels[1].start, sg_conf) sg_accel_up_task_3 = ex.submit(cluster.sg_accels[2].start, sg_conf) assert sg_accel_up_task_1.result() == 0 assert sg_accel_up_task_2.result() == 0 assert sg_accel_up_task_3.result() == 0 # Wait for pindex to reshard correctly assert cluster.validate_cbgt_pindex_distribution_retry(3) # Block until second bulk_docs is complete doc_push_result_3 = docs_3_task.result() assert len(doc_push_result_3) == num_docs client.verify_docs_present(url=sg_url, db=sg_db, expected_docs=doc_push_result_3, auth=doc_pusher_auth) # Combine the 3 push results and make sure the changes propagate to a_user # a_user has access to the doc's channel. log_info("Verifying all the changes show up for 'a_user' ...") all_docs = doc_push_result_1 + docs_push_result_2 + doc_push_result_3 client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=all_docs, auth=a_user_session, polling_interval=2)
def test_replication_with_multiple_client_dbs_and_single_sync_gateway_db(setup_client_syncgateway_test): """Test replication from multiple client dbs to one sync_gateway db""" cluster_config = setup_client_syncgateway_test["cluster_config"] ls_url = setup_client_syncgateway_test["ls_url"] sg_url = setup_client_syncgateway_test["sg_url"] sg_admin_url = setup_client_syncgateway_test["sg_admin_url"] num_docs = 1000 sg_helper = SyncGateway() sg_helper.start_sync_gateway( cluster_config=cluster_config, url=sg_url, config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS) ) log_info("Running 'test_replication_with_multiple_client_dbs_and_single_sync_gateway_db'") log_info("ls_url: {}".format(ls_url)) log_info("sg_admin_url: {}".format(sg_admin_url)) log_info("sg_url: {}".format(sg_url)) client = MobileRestClient() ls_db1 = client.create_database(url=ls_url, name="ls_db1") ls_db2 = client.create_database(url=ls_url, name="ls_db2") sg_db = client.create_database(url=sg_admin_url, name="sg_db", server="walrus:") # Setup continuous push / pull replication from ls_db1 to sg_db client.start_replication( url=ls_url, continuous=True, from_db=ls_db1, to_url=sg_admin_url, to_db=sg_db ) client.start_replication( url=ls_url, continuous=True, from_url=sg_admin_url, from_db=sg_db, to_db=ls_db1 ) # Setup continuous push / pull replication from ls_db2 to sg_db client.start_replication( url=ls_url, continuous=True, from_db=ls_db2, to_url=sg_admin_url, to_db=sg_db ) client.start_replication( url=ls_url, continuous=True, from_url=sg_admin_url, from_db=sg_db, to_db=ls_db2 ) ls_db_one_docs = client.add_docs(url=ls_url, db=ls_db1, number=num_docs, id_prefix=ls_db1) assert len(ls_db_one_docs) == 1000 ls_db_two_docs = client.add_docs(url=ls_url, db=ls_db2, number=num_docs, id_prefix=ls_db2) assert len(ls_db_two_docs) == 1000 ls_db1_db2_docs = ls_db_one_docs + ls_db_two_docs client.verify_docs_present(url=ls_url, db=ls_db1, expected_docs=ls_db1_db2_docs) client.verify_docs_present(url=ls_url, db=ls_db2, expected_docs=ls_db1_db2_docs) client.verify_docs_present(url=sg_admin_url, db=sg_db, expected_docs=ls_db1_db2_docs) client.verify_docs_in_changes(url=sg_admin_url, db=sg_db, expected_docs=ls_db1_db2_docs) client.verify_docs_in_changes(url=ls_url, db=ls_db1, expected_docs=ls_db1_db2_docs) client.verify_docs_in_changes(url=ls_url, db=ls_db2, expected_docs=ls_db1_db2_docs)
def test_initial_push_replication(setup_client_syncgateway_test, continuous): """ 1. Prepare LiteServ to have 10000 documents. 2. Create a single shot push / continuous replicator and to push the docs into a sync_gateway database. 3. Verify if all of the docs get pushed. """ sg_db = "db" ls_db = "ls_db" seth_channels = ["ABC", "NBC"] num_docs = 10000 cluster_config = setup_client_syncgateway_test["cluster_config"] ls_url = setup_client_syncgateway_test["ls_url"] sg_one_admin = setup_client_syncgateway_test["sg_admin_url"] sg_one_public = setup_client_syncgateway_test["sg_url"] sg_helper = SyncGateway() sg_helper.start_sync_gateway( cluster_config=cluster_config, url=sg_one_public, config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS) ) log_info("Running 'test_initial_push_replication', continuous: {}".format(continuous)) log_info("ls_url: {}".format(ls_url)) log_info("sg_one_admin: {}".format(sg_one_admin)) log_info("sg_one_public: {}".format(sg_one_public)) client = MobileRestClient() client.create_user(sg_one_admin, sg_db, "seth", password="******", channels=seth_channels) session = client.create_session(sg_one_admin, sg_db, "seth") client.create_database(url=ls_url, name=ls_db) # Create 'num_docs' docs on LiteServ docs = client.add_docs( url=ls_url, db=ls_db, number=num_docs, id_prefix="seeded_doc", generator="four_k", channels=seth_channels ) assert len(docs) == num_docs # Start push replication repl_id = client.start_replication( url=ls_url, continuous=continuous, from_db=ls_db, to_url=sg_one_admin, to_db=sg_db ) if continuous: log_info("Waiting for replication status 'Idle' for: {}".format(repl_id)) client.wait_for_replication_status_idle(ls_url, repl_id) else: log_info("Waiting for no replications: {}".format(repl_id)) client.wait_for_no_replications(ls_url) # Verify docs replicated to sync_gateway client.verify_docs_present(url=sg_one_public, db=sg_db, expected_docs=docs, auth=session) # Verify docs show up in sync_gateway's changes feed client.verify_docs_in_changes(url=sg_one_public, db=sg_db, expected_docs=docs, auth=session) replications = client.get_replications(url=ls_url) if continuous: assert len(replications) == 1, "There should only be one replication running" assert replications[0]["status"] == "Idle", "Replication Status should be 'Idle'" assert replications[0]["continuous"], "Running replication should be continuous" # Only .NET has an 'error' property if "error" in replications[0]: assert len(replications[0]["error"]) == 0 else: assert len(replications) == 0, "No replications should be running"
def test_backfill_channels_oneshot_changes(params_from_base_test_setup, sg_conf_name, grant_type): cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] mode = params_from_base_test_setup["mode"] sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" log_info("grant_type: {}".format(grant_type)) sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster = Cluster(cluster_config) cluster.reset(sg_conf) client = MobileRestClient() admin_user_info = userinfo.UserInfo("admin", "pass", channels=["A"], roles=[]) user_b_user_info = userinfo.UserInfo("USER_B", "pass", channels=["B"], roles=[]) # Create users / sessions client.create_user(url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password, channels=admin_user_info.channels) client.create_user(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, password=user_b_user_info.password, channels=user_b_user_info.channels) admin_session = client.create_session(url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password) user_b_session = client.create_session(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, password=user_b_user_info.password) # Create 50 "A" channel docs a_docs = client.add_docs(url=sg_url, db=sg_db, number=50, id_prefix=None, auth=admin_session, channels=["A"]) assert len(a_docs) == 50 b_docs = client.add_docs(url=sg_url, db=sg_db, number=1, id_prefix="b_doc", auth=user_b_session, channels=["B"]) assert len(b_docs) == 1 user_doc = {"id": "_user/USER_B", "rev": None} b_docs.append(user_doc) # Loop until user_b sees b_doc_0 doc and _user/USER_B doc client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=b_docs, auth=user_b_session, strict=True) # Get last_seq for user_b user_b_changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=user_b_session, feed="normal") # Grant access to channel "A" if grant_type == "CHANNEL-REST": log_info("Granting user access to channel A via Admin REST user update") # Grant via update to user in Admin API client.update_user(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, channels=["A", "B"]) elif grant_type == "CHANNEL-SYNC": log_info("Granting user access to channel A sync function access()") # Grant via access() in sync_function, then id 'channel_access' will trigger an access(doc.users, doc.channels) access_doc = document.create_doc("channel_access", channels=["A"]) access_doc["users"] = ["USER_B"] client.add_doc(url=sg_url, db=sg_db, doc=access_doc, auth=admin_session) elif grant_type == "ROLE-REST": log_info("Granting user access to channel A via Admin REST role grant") # Create role with channel A client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"]) client.update_user(url=sg_admin_url, db=sg_db, name="USER_B", roles=["channel-A-role"]) elif grant_type == "ROLE-SYNC": log_info("Granting user access to channel A via sync function role() grant") # Create role with channel A client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"]) # Grant via role() in sync_function, then id 'role_access' will trigger an role(doc.users, doc.roles) role_access_doc = document.create_doc("role_access") role_access_doc["users"] = ["USER_B"] role_access_doc["roles"] = ["role:channel-A-role"] client.add_doc(sg_url, db=sg_db, doc=role_access_doc, auth=admin_session) else: pytest.fail("Unsupported grant_type!!!!") user_b_changes_after_grant = client.get_changes(url=sg_url, db=sg_db, since=user_b_changes["last_seq"], auth=user_b_session, feed="normal") # User B shoud have recieved 51 docs (a_docs + 1 _user/USER_B doc) if a REST grant or 50 changes if the grant # is via the sync function changes_results = user_b_changes_after_grant["results"] assert 50 <= len(changes_results) <= 51 # Create a dictionary of id rev pair of all the docs that are not "_user/" docs from changes ids_and_revs_from_user_changes = { change["id"]: change["changes"][0]["rev"] for change in changes_results if not change["id"].startswith("_user/") } assert len(ids_and_revs_from_user_changes) == 50 # Create a list of id rev pair of all of the channel A docs ids_and_revs_from_a_docs = {doc["id"]: doc["rev"] for doc in a_docs} assert len(ids_and_revs_from_a_docs) == 50 # Check that the changes and the a_docs are identical in id and rev assert ids_and_revs_from_user_changes == ids_and_revs_from_a_docs # Get changes from last_seq of the changes request after the grant. There should be no new changes user_b_changes = client.get_changes(url=sg_url, db=sg_db, since=user_b_changes_after_grant["last_seq"], auth=user_b_session, feed="normal") assert len(user_b_changes["results"]) == 0
def test_take_all_sgaccels_down(params_from_base_test_setup, sg_conf): """ Scenario that takes all sync_gateway accel nodes offline during doc load. After bring the nodes back online during load, the reshard of the DCP feed is verified. The changes feed is verified that all docs show up. 1. Start doc load (1000 doc) 2. Take all sg_accel nodes down in parallel 3. Verify node are down 4. Wait for doc adds to complete, store "doc_push_result_1" 5. Verify "doc_push_result_1" docs added 6. Start doc load (1000 docs) 7. Wait for 5. to complete, store "doc_push_result_2" 8. Verify "doc_push_result_2" docs added 9. Start another doc load (1000 docs) 10. Bring up nodes in parallel 11. poll on p-index reshard 12. Wait for 9. to complete, store "doc_push_result_3" 13. Verify "doc_push_result_3" docs added 14. Verify "doc_push_result_1" + "doc_push_result_2" + "doc_push_result_3" show up in _changes feed """ cluster_conf = params_from_base_test_setup["cluster_config"] log_info("Running 'test_dcp_reshard_single_sg_accel_goes_down_and_up'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) cluster_util = ClusterKeywords() topology = cluster_util.get_cluster_topology(cluster_conf) sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" num_docs = 1000 client = MobileRestClient() doc_pusher_user_info = userinfo.UserInfo("doc_pusher", "pass", channels=["A"], roles=[]) doc_pusher_auth = client.create_user( url=sg_admin_url, db=sg_db, name=doc_pusher_user_info.name, password=doc_pusher_user_info.password, channels=doc_pusher_user_info.channels ) a_user_info = userinfo.UserInfo("a_user", "pass", channels=["A"], roles=[]) client.create_user( url=sg_admin_url, db=sg_db, name=a_user_info.name, password=a_user_info.password, channels=a_user_info.channels ) a_user_session = client.create_session( url=sg_admin_url, db=sg_db, name=a_user_info.name, password=a_user_info.password ) # Shutdown all accel nodes in parallel with concurrent.futures.ThreadPoolExecutor(max_workers=3) as ex: # Start adding docs docs_1 = document.create_docs(None, num_docs, channels=doc_pusher_user_info.channels) docs_1_task = ex.submit(client.add_bulk_docs, url=sg_url, db=sg_db, docs=docs_1, auth=doc_pusher_auth) # Take down all access nodes log_info("Shutting down sg_accels: [{}, {}, {}] ...".format( cluster.sg_accels[0], cluster.sg_accels[1], cluster.sg_accels[2] )) sg_accel_down_task_1 = ex.submit(cluster.sg_accels[0].stop) sg_accel_down_task_2 = ex.submit(cluster.sg_accels[1].stop) sg_accel_down_task_3 = ex.submit(cluster.sg_accels[2].stop) assert sg_accel_down_task_1.result() == 0 assert sg_accel_down_task_2.result() == 0 assert sg_accel_down_task_3.result() == 0 # Block until bulk_docs is complete doc_push_result_1 = docs_1_task.result() assert len(doc_push_result_1) == num_docs client.verify_docs_present(url=sg_url, db=sg_db, expected_docs=doc_push_result_1, auth=doc_pusher_auth) # Load sync_gateway with another batch of docs while the sg_accel nodes are offline docs_2_bodies = document.create_docs(None, num_docs, channels=doc_pusher_user_info.channels) docs_push_result_2 = client.add_bulk_docs(url=sg_url, db=sg_db, docs=docs_2_bodies, auth=doc_pusher_auth) assert len(docs_push_result_2) == num_docs client.verify_docs_present(url=sg_url, db=sg_db, expected_docs=docs_push_result_2, auth=doc_pusher_auth) # Start loading Sync Gateway with another set of docs while bringing the sg_accel nodes online docs_3 = document.create_docs(None, num_docs, channels=doc_pusher_user_info.channels) docs_3_task = ex.submit(client.add_bulk_docs, url=sg_url, db=sg_db, docs=docs_3, auth=doc_pusher_auth) # Bring all the sg_accel nodes back up # Take down all access nodes log_info("Starting sg_accels: [{}, {}, {}] ...".format( cluster.sg_accels[0], cluster.sg_accels[1], cluster.sg_accels[2] )) sg_accel_up_task_1 = ex.submit(cluster.sg_accels[0].start, sg_conf) sg_accel_up_task_2 = ex.submit(cluster.sg_accels[1].start, sg_conf) sg_accel_up_task_3 = ex.submit(cluster.sg_accels[2].start, sg_conf) assert sg_accel_up_task_1.result() == 0 assert sg_accel_up_task_2.result() == 0 assert sg_accel_up_task_3.result() == 0 # Wait for pindex to reshard correctly assert cluster.validate_cbgt_pindex_distribution_retry(3) # Block until second bulk_docs is complete doc_push_result_3 = docs_3_task.result() assert len(doc_push_result_3) == num_docs client.verify_docs_present(url=sg_url, db=sg_db, expected_docs=doc_push_result_3, auth=doc_pusher_auth) # Combine the 3 push results and make sure the changes propagate to a_user # a_user has access to the doc's channel. log_info("Verifying all the changes show up for 'a_user' ...") all_docs = doc_push_result_1 + docs_push_result_2 + doc_push_result_3 client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=all_docs, auth=a_user_session, polling_interval=2)
def test_peer_2_peer_sanity(setup_p2p_test): """ 1. Sanity P2P Scenario 2. Launch LiteServ 1 and LiteServ 2 3. Create a database on each LiteServ 4. Start continuous push pull replication from each db to the other 5. Add docs to each db 6. Verify the docs show up at each db 7. Verify the docs show up in the database's changes feed. """ ls_url_one = setup_p2p_test["ls_url_one"] ls_url_two = setup_p2p_test["ls_url_two"] num_docs_per_db = 1000 log_info("ls_url_one: {}".format(ls_url_one)) log_info("ls_url_two: {}".format(ls_url_two)) client = MobileRestClient() log_info("Creating databases") ls_db1 = client.create_database(url=ls_url_one, name="ls_db1") ls_db2 = client.create_database(url=ls_url_two, name="ls_db2") # Setup continuous push / pull replication from LiteServ 1 ls_db1 to LiteServ 2 ls_db2 repl_one = client.start_replication( url=ls_url_one, continuous=True, from_db=ls_db1, to_url=ls_url_two, to_db=ls_db2 ) repl_two = client.start_replication( url=ls_url_one, continuous=True, from_url=ls_url_two, from_db=ls_db2, to_db=ls_db1 ) # Setup continuous push / pull replication from LiteServ 2 ls_db2 to LiteServ 1 ls_db1 repl_three = client.start_replication( url=ls_url_two, continuous=True, from_db=ls_db2, to_url=ls_url_one, to_db=ls_db1 ) repl_four = client.start_replication( url=ls_url_two, continuous=True, from_url=ls_url_one, from_db=ls_db1, to_db=ls_db2 ) client.wait_for_replication_status_idle(url=ls_url_one, replication_id=repl_one) client.wait_for_replication_status_idle(url=ls_url_one, replication_id=repl_two) client.wait_for_replication_status_idle(url=ls_url_two, replication_id=repl_three) client.wait_for_replication_status_idle(url=ls_url_two, replication_id=repl_four) ls_url_one_replications = client.get_replications(ls_url_one) assert len(ls_url_one_replications) == 2 ls_url_two_replications = client.get_replications(ls_url_two) assert len(ls_url_two_replications) == 2 ls_db1_docs = client.add_docs(url=ls_url_one, db=ls_db1, number=num_docs_per_db, id_prefix="test_ls_db1") assert len(ls_db1_docs) == num_docs_per_db ls_db2_docs = client.add_docs(url=ls_url_two, db=ls_db2, number=num_docs_per_db, id_prefix="test_ls_db2") assert len(ls_db2_docs) == num_docs_per_db all_docs = client.merge(ls_db1_docs, ls_db2_docs) assert len(all_docs) == 2000 client.verify_docs_present(url=ls_url_one, db=ls_db1, expected_docs=all_docs) client.verify_docs_present(url=ls_url_two, db=ls_db2, expected_docs=all_docs) client.verify_docs_in_changes(url=ls_url_one, db=ls_db1, expected_docs=all_docs) client.verify_docs_in_changes(url=ls_url_two, db=ls_db2, expected_docs=all_docs)
def test_auto_prune_listener_keeps_conflicts_sanity( setup_client_syncgateway_test): """" 1. Create db on LiteServ and add docs 2. Create db on sync_gateway and add docs with the same id 3. Create one shot push / pull replication 4. Update LiteServ 50 times 5. Assert that pruned conflict is still present 6. Delete the current revision and check that a GET returns the old conflict as the current rev """ cluster_config = setup_client_syncgateway_test["cluster_config"] sg_mode = setup_client_syncgateway_test["sg_mode"] ls_url = setup_client_syncgateway_test["ls_url"] sg_url = setup_client_syncgateway_test["sg_url"] sg_admin_url = setup_client_syncgateway_test["sg_admin_url"] client = MobileRestClient() sg_config = sync_gateway_config_path_for_mode( "listener_tests/listener_tests", sg_mode) c = cluster.Cluster(config=cluster_config) c.reset(sg_config_path=sg_config) log_info("Running 'test_auto_prune_listener_keeps_conflicts_sanity' ...") log_info("ls_url: {}".format(ls_url)) log_info("sg_url: {}".format(sg_url)) log_info("sg_admin_url: {}".format(sg_admin_url)) num_docs = 1 num_revs = 100 sg_db = "db" ls_db = "ls_db" sg_user_name = "sg_user" sg_user_channels = ["NBC"] client.create_user(url=sg_admin_url, db=sg_db, name=sg_user_name, password="******", channels=sg_user_channels) sg_session = client.create_session(url=sg_admin_url, db=sg_db, name=sg_user_name) ls_db = client.create_database(url=ls_url, name=ls_db) # Create docs with same prefix to create conflicts when the dbs complete 1 shot replication ls_db_docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs, id_prefix="doc", channels=sg_user_channels) assert len(ls_db_docs) == num_docs sg_db_docs = client.add_docs(url=sg_url, db=sg_db, number=num_docs, id_prefix="doc", channels=sg_user_channels, auth=sg_session) assert len(sg_db_docs) == num_docs # Wait for changes to be available on Sync Gateway client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=sg_db_docs, auth=sg_session, polling_interval=1) # Setup one shot pull replication and wait for idle. client.start_replication(url=ls_url, continuous=False, from_url=sg_admin_url, from_db=sg_db, to_db=ls_db) client.wait_for_no_replications(url=ls_url) # There should now be a conflict on the client conflicting_revs = client.get_conflict_revs(url=ls_url, db=ls_db, doc=ls_db_docs[0]) # Get the doc with conflict rev client.get_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], rev=conflicting_revs[0]) # Update doc past revs limit and make sure conflict is still available updated_doc = client.update_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], number_updates=num_revs) client.get_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], rev=conflicting_revs[0]) # Delete doc and ensure that the conflict is now the current rev client.delete_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], rev=updated_doc["rev"]) current_doc = client.get_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"]) assert current_doc["_rev"] == conflicting_revs[0]
def test_rebalance_sanity(params_from_base_test_setup): cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_version = get_sg_version(cluster_config) if compare_versions(sg_version, '1.5') < 0: pytest.skip("This test needs multiple URLs in the SG config, not supported by SG < 1.5") cluster_helper = ClusterKeywords() sg_conf_name = "sync_gateway_default_functional_tests" sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper.reset_cluster(cluster_config=cluster_config, sync_gateway_config=sg_conf_path) topology = cluster_helper.get_cluster_topology(cluster_config) admin_sg_one = topology["sync_gateways"][0]["admin"] sg_one_url = topology["sync_gateways"][0]["public"] cluster_servers = topology["couchbase_servers"] cbs_one_url = cluster_servers[0] cbs_two_url = cluster_servers[1] log_info("Running: 'test_distributed_index_rebalance_sanity'") log_info("cluster_config: {}".format(cluster_config)) log_info("admin_sg: {}".format(admin_sg_one)) log_info("sg_url: {}".format(sg_one_url)) log_info("cbs_one_url: {}".format(cbs_one_url)) log_info("cbs_two_url: {}".format(cbs_two_url)) sg_db = "db" num_docs = 100 num_updates = 100 sg_user_name = "seth" sg_user_password = "******" channels = ["ABC", "CBS"] client = MobileRestClient() cb_server = couchbaseserver.CouchbaseServer(cbs_one_url) server_to_remove = couchbaseserver.CouchbaseServer(cbs_two_url) client.create_user(admin_sg_one, sg_db, sg_user_name, sg_user_password, channels=channels) session = client.create_session(admin_sg_one, sg_db, sg_user_name) with concurrent.futures.ThreadPoolExecutor(5) as executor: # Add docs to sg log_info("Adding docs to sync_gateway") docs = client.add_docs(sg_one_url, sg_db, num_docs, "test_doc", channels=channels, auth=session) assert len(docs) == num_docs # Start updating docs and rebalance out one CBS node log_info("Updating docs on sync_gateway") update_docs_task = executor.submit(client.update_docs, sg_one_url, sg_db, docs, num_updates, auth=session) # Run rebalance in background cb_server.rebalance_out(cluster_servers, server_to_remove) updated_docs = update_docs_task.result() log_info(updated_docs) # Verify docs / revisions present client.verify_docs_present(sg_one_url, sg_db, updated_docs, auth=session) # Verify docs revisions in changes feed client.verify_docs_in_changes(sg_one_url, sg_db, updated_docs, auth=session) # Rebalance Server back in to the pool cb_server.add_node(server_to_remove) cb_server.rebalance_in(cluster_servers, server_to_remove)
def test_server_goes_down_rebuild_channels(params_from_base_test_setup): """ 1. Start with a two node couchbase server cluster 2. Starting adding docs 3. Kill one of the server nodes and signal completion 4. Stop adding docs 5. Verify that that the expected docs are present and in the changes feed. 6. Start server again and add to cluster """ cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_version = get_sg_version(cluster_config) if compare_versions(sg_version, '1.5') < 0: pytest.skip("This test needs multiple URLs in the SG config, not supported by SG < 1.5") cluster_helper = ClusterKeywords() sg_conf_name = "sync_gateway_default_functional_tests" sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper.reset_cluster(cluster_config=cluster_config, sync_gateway_config=sg_conf_path) topology = cluster_helper.get_cluster_topology(cluster_config) admin_sg = topology["sync_gateways"][0]["admin"] sg_url = topology["sync_gateways"][0]["public"] coucbase_servers = topology["couchbase_servers"] cbs_one_url = coucbase_servers[0] cbs_two_url = coucbase_servers[1] log_info("Running: 'test_server_goes_down_sanity'") log_info("cluster_config: {}".format(cluster_config)) log_info("admin_sg: {}".format(admin_sg)) log_info("sg_url: {}".format(sg_url)) log_info("cbs_one_url: {}".format(cbs_one_url)) log_info("cbs_two_url: {}".format(cbs_two_url)) sg_db = "db" num_docs = 100 admin_user_info = userinfo.UserInfo( name="admin", password="******", channels=["ABC"], roles=[] ) seth_user_info = userinfo.UserInfo( name="seth", password="******", channels=["ABC"], roles=[] ) client = MobileRestClient() main_server = couchbaseserver.CouchbaseServer(cbs_one_url) flakey_server = couchbaseserver.CouchbaseServer(cbs_two_url) admin_auth = client.create_user( admin_sg, sg_db, admin_user_info.name, admin_user_info.password, channels=admin_user_info.channels ) client.create_user( admin_sg, sg_db, seth_user_info.name, seth_user_info.password, channels=seth_user_info.channels ) seth_session = client.create_session(admin_sg, sg_db, seth_user_info.name) # allow any user docs to make it to changes initial_changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=seth_session) # push docs from admin docs = client.add_docs( url=sg_url, db=sg_db, number=num_docs, id_prefix=None, channels=admin_user_info.channels, auth=admin_auth ) assert len(docs) == num_docs client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=docs, auth=seth_session) changes_before_failover = client.get_changes(url=sg_url, db=sg_db, since=initial_changes["last_seq"], auth=seth_session) assert len(changes_before_failover["results"]) == num_docs # Stop server via 'service stop' flakey_server.stop() start = time.time() while True: # Fail tests if all docs do not succeed before timeout if (time.time() - start) > 60: # Bring server back up before failing the test flakey_server.start() main_server.recover(flakey_server) main_server.rebalance_in(coucbase_servers, flakey_server) raise keywords.exceptions.TimeoutError("Failed to rebuild changes") try: # Poll until failover happens (~30 second) client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=docs, auth=seth_session) # changes requests succeeded, exit loop break except requests.exceptions.HTTPError: # Changes will fail until failover of the down server happens. Wait and try again. log_info("/db/_changes failed due to server down. Retrying ...") time.sleep(1) # Verify no new changes changes = client.get_changes( url=sg_url, db=sg_db, since=changes_before_failover["last_seq"], auth=seth_session, feed="normal" ) assert len(changes["results"]) == 0 # Check that all changes are intact from initial changes request changes = client.get_changes(url=sg_url, db=sg_db, since=initial_changes["last_seq"], auth=seth_session) assert len(changes["results"]) == num_docs coucbase_servers = topology["couchbase_servers"] # Test succeeded without timeout, bring server back into topology flakey_server.start() main_server.recover(flakey_server) main_server.rebalance_in(coucbase_servers, flakey_server)
def test_server_goes_down_sanity(params_from_base_test_setup): """ 1. Start with a two node couchbase server cluster 2. Starting adding docs 3. Kill one of the server nodes and signal completion 4. Stop adding docs 5. Verify that that the expected docs are present and in the changes feed. 6. Start server again and add to cluster """ cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_version = get_sg_version(cluster_config) if compare_versions(sg_version, '1.5') < 0: pytest.skip("This test needs multiple URLs in the SG config, not supported by SG < 1.5") cluster_helper = ClusterKeywords() sg_conf_name = "sync_gateway_default_functional_tests" sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper.reset_cluster(cluster_config=cluster_config, sync_gateway_config=sg_conf_path) topology = cluster_helper.get_cluster_topology(cluster_config) admin_sg = topology["sync_gateways"][0]["admin"] sg_url = topology["sync_gateways"][0]["public"] coucbase_servers = topology["couchbase_servers"] cbs_one_url = coucbase_servers[0] cbs_two_url = coucbase_servers[1] log_info("Running: 'test_server_goes_down_sanity'") log_info("cluster_config: {}".format(cluster_config)) log_info("admin_sg: {}".format(admin_sg)) log_info("sg_url: {}".format(sg_url)) log_info("cbs_one_url: {}".format(cbs_one_url)) log_info("cbs_two_url: {}".format(cbs_two_url)) sg_db = "db" num_docs = 100 sg_user_name = "seth" sg_user_password = "******" channels = ["ABC", "CBS"] client = MobileRestClient() main_server = couchbaseserver.CouchbaseServer(cbs_one_url) flakey_server = couchbaseserver.CouchbaseServer(cbs_two_url) client.create_user(admin_sg, sg_db, sg_user_name, sg_user_password, channels=channels) session = client.create_session(admin_sg, sg_db, sg_user_name) # Stop second server flakey_server.stop() # Try to add 100 docs in a loop until all succeed, if the never do, fail with timeout errors = num_docs # Wait 30 seconds for auto failover # (Minimum value suggested - http://docs.couchbase.com/admin/admin/Tasks/tasks-nodeFailover.html) # + 15 seconds to add docs timeout = 45 start = time.time() successful_add = False while not successful_add: # Fail tests if all docs do not succeed before timeout if (time.time() - start) > timeout: # Bring server back up before failing the test flakey_server.start() main_server.rebalance_in(coucbase_servers, flakey_server) raise TimeoutError("Failed to successfully put docs before timeout") try: docs = client.add_docs(url=sg_url, db=sg_db, number=num_docs, id_prefix=None, auth=session, channels=channels) # If the above add doc does not throw, it was a successfull add. successful_add = True except requests.exceptions.HTTPError as he: log_info("Failed to add docs: {}".format(he)) log_info("Seeing: {} errors".format(errors)) time.sleep(1) assert len(docs) == 100 client.verify_docs_present(url=sg_url, db=sg_db, expected_docs=docs, auth=session) try: client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=docs, auth=session, polling_interval=5) except keywords.exceptions.TimeoutException: # timeout verifying docs. Bring server back in to restore topology, then fail # Failing due to https://github.com/couchbase/sync_gateway/issues/2197 flakey_server.start() main_server.recover(flakey_server) main_server.rebalance_in(coucbase_servers, flakey_server) raise keywords.exceptions.TimeoutException("Failed to get all changes") # Test succeeded without timeout, bring server back into topology flakey_server.start() main_server.recover(flakey_server) main_server.rebalance_in(coucbase_servers, flakey_server) # Make sure all docs were not added before server was log_info("test_server_goes_down_sanity complete!")
def test_awaken_backfill_channels_longpoll_changes_with_limit( params_from_base_test_setup, sg_conf_name, grant_type): """ Test that checks that docs are backfilled for logpoll changes with limit for a access grant (via REST or SYNC) CHANNEL-REST = Channel is granted to user via REST CHANNEL-SYNC = Channel is granted to user via sync function access() ROLE-REST = Role is granted to user via REST ROLE-SYNC = Role is granted to user via sync function role() CHANNEL-TO-ROLE-REST = Channel is added to existing role via REST CHANNEL-TO-ROLE-SYNC = Channel is added to existing role via sync access() """ cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] mode = params_from_base_test_setup["mode"] sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" log_info("grant_type: {}".format(grant_type)) sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster = Cluster(cluster_config) cluster.reset(sg_conf) client = MobileRestClient() admin_user_info = userinfo.UserInfo("admin", "pass", channels=["A"], roles=[]) if grant_type == "CHANNEL-TO-ROLE-REST" or grant_type == "CHANNEL-TO-ROLE-SYNC": client.create_role(url=sg_admin_url, db=sg_db, name="empty_role", channels=[]) user_b_user_info = userinfo.UserInfo("USER_B", "pass", channels=["B"], roles=["empty_role"]) else: user_b_user_info = userinfo.UserInfo("USER_B", "pass", channels=["B"], roles=[]) # Create users / sessions client.create_user( url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password, channels=admin_user_info.channels, ) client.create_user(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, password=user_b_user_info.password, channels=user_b_user_info.channels, roles=user_b_user_info.roles) admin_session = client.create_session(url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password) user_b_session = client.create_session(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, password=user_b_user_info.password) # Create 50 "A" channel docs a_docs = client.add_docs(url=sg_url, db=sg_db, number=50, id_prefix=None, auth=admin_session, channels=["A"]) assert len(a_docs) == 50 b_docs = client.add_docs(url=sg_url, db=sg_db, number=1, id_prefix="b_doc", auth=user_b_session, channels=["B"]) assert len(b_docs) == 1 user_doc = {"id": "_user/USER_B", "rev": None} b_docs.append(user_doc) # Loop until user_b sees b_doc_0 doc and _user/USER_B doc client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=b_docs, auth=user_b_session, strict=True) # Create a dictionary keyed on doc id for all of channel A docs ids_and_revs_from_a_docs = {doc["id"]: doc["rev"] for doc in a_docs} assert len(ids_and_revs_from_a_docs.keys()) == 50 # Get last_seq for user_b user_b_changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=user_b_session, feed="normal") with concurrent.futures.ThreadPoolExecutor(max_workers=10) as ex: # Start long poll changes feed. changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=user_b_changes["last_seq"], auth=user_b_session, timeout=10, limit=20) # Grant access to channel "A" if grant_type == "CHANNEL-REST": log_info( "Granting user access to channel A via Admin REST user update") # Grant via update to user in Admin API client.update_user(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, channels=["A", "B"]) elif grant_type == "CHANNEL-SYNC": log_info( "Granting user access to channel A sync function access()") # Grant via access() in sync_function, # then id 'channel_access' will trigger an access(doc.users, doc.channels) access_doc = document.create_doc("channel_access", channels=["A"]) access_doc["users"] = ["USER_B"] client.add_doc(url=sg_url, db=sg_db, doc=access_doc, auth=admin_session) elif grant_type == "ROLE-REST": log_info( "Granting user access to channel A via Admin REST role grant") # Create role with channel A client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"]) client.update_user(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, roles=["channel-A-role"]) elif grant_type == "ROLE-SYNC": log_info( "Granting user access to channel A via sync function role() grant" ) # Create role with channel A client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"]) # Grant via role() in sync_function, then id 'role_access' will trigger an role(doc.users, doc.roles) role_access_doc = document.create_doc("role_access") role_access_doc["users"] = ["USER_B"] role_access_doc["roles"] = ["role:channel-A-role"] client.add_doc(sg_url, db=sg_db, doc=role_access_doc, auth=admin_session) elif grant_type == "CHANNEL-TO-ROLE-REST": # Update the empty_role to have channel "A" client.update_role(url=sg_admin_url, db=sg_db, name="empty_role", channels=["A"]) elif grant_type == "CHANNEL-TO-ROLE-SYNC": # Grant empty_role access to channel "A" via sync function # Grant channel access to role via sync function access_doc = document.create_doc("channel_grant_to_role") access_doc["roles"] = ["role:empty_role"] access_doc["channels"] = ["A"] client.add_doc(url=sg_url, db=sg_db, doc=access_doc, auth=admin_session, use_post=True) else: pytest.fail("Unsupported grant_type!!!!") # Block on return of longpoll changes, feed should wake up and return 20 results changes = changes_task.result() assert len(changes["results"]) == 20 num_requests = 1 # append _user/doc to the doc scratch pad if a REST grant if grant_type == "CHANNEL-REST" or grant_type == "ROLE-REST": ids_and_revs_from_a_docs["_user/USER_B"] = None # Cross the results off from the 'a_docs' dictionary for doc in changes["results"]: del ids_and_revs_from_a_docs[doc["id"]] # Start looping longpoll changes with limit, cross off changes from dictionary each time one is found # Since 20 changes should be crossed off already, this should execute 2x. log_info("Starting looping longpoll changes with limit!") last_seq = changes["last_seq"] while True: if len(ids_and_revs_from_a_docs.keys()) == 0: log_info("All docs were found! Exiting polling loop") break changes = client.get_changes(url=sg_url, db=sg_db, since=last_seq, auth=user_b_session, limit=20, timeout=10) num_requests += 1 # There are more than 2 requests, throw an exception. if num_requests == 2: assert len(changes["results"]) == 20 elif num_requests == 3: # This will be 10 or 11 depending on if the _user/ doc is returned if grant_type == "CHANNEL-REST" or grant_type == "ROLE-REST": assert len(changes["results"]) == 11 else: assert len(changes["results"]) == 10 else: raise exceptions.ChangesError( "Looping longpoll should only have to perform 3 requests to get all the changes!!" ) # Cross the results off from the 'a_docs' dictionary. # This will blow up in docs duplicate docs are sent to changes for doc in changes["results"]: del ids_and_revs_from_a_docs[doc["id"]] last_seq = changes["last_seq"] # Shanges after longpoll zero_results = client.get_changes(url=sg_url, db=sg_db, since=last_seq, auth=user_b_session, feed="normal") # Changes should be caught up and there should be no results assert len(zero_results["results"]) == 0
def test_longpoll_awaken_channels(params_from_base_test_setup, sg_conf_name): cluster_conf = params_from_base_test_setup["cluster_config"] cluster_topology = params_from_base_test_setup["cluster_topology"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) sg_admin_url = cluster_topology["sync_gateways"][0]["admin"] sg_url = cluster_topology["sync_gateways"][0]["public"] log_info("sg_conf: {}".format(sg_conf)) log_info("sg_admin_url: {}".format(sg_admin_url)) log_info("sg_url: {}".format(sg_url)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) adam_user_info = userinfo.UserInfo(name="adam", password="******", channels=["NBC", "ABC"], roles=[]) traun_user_info = userinfo.UserInfo(name="traun", password="******", channels=[], roles=[]) andy_user_info = userinfo.UserInfo(name="andy", password="******", channels=[], roles=[]) sg_db = "db" doc_id = "adam_doc_0" client = MobileRestClient() adam_auth = client.create_user(url=sg_admin_url, db=sg_db, name=adam_user_info.name, password=adam_user_info.password, channels=adam_user_info.channels) traun_auth = client.create_user(url=sg_admin_url, db=sg_db, name=traun_user_info.name, password=traun_user_info.password, channels=traun_user_info.channels) andy_auth = client.create_user(url=sg_admin_url, db=sg_db, name=andy_user_info.name, password=andy_user_info.password, channels=andy_user_info.channels) ############################################################ # changes feed wakes with Channel Access via Admin API ############################################################ # Get starting sequence of docs, use the last seq to progress past any _user docs. adam_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=adam_auth) traun_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=traun_auth) andy_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=andy_auth) with concurrent.futures.ProcessPoolExecutor() as ex: # Start changes feed for 3 users adam_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=adam_changes["last_seq"], timeout=10, auth=adam_auth) traun_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=traun_changes["last_seq"], timeout=10, auth=traun_auth) andy_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=andy_changes["last_seq"], timeout=10, auth=andy_auth) # Wait for changes feed to notice there are no changes and enter wait. 2 seconds should be more than enough time.sleep(2) # Make sure the changes future is still running and has not exited due to any new changes, the feed should be caught up # and waiting assert not adam_changes_task.done() assert not traun_changes_task.done() assert not andy_changes_task.done() # Add add a doc for adam with "NBC" and "ABC" channels # Add one doc, this should wake up the changes feed adam_add_docs_task = ex.submit(client.add_docs, url=sg_url, db=sg_db, number=1, id_prefix="adam_doc", auth=adam_auth, channels=adam_user_info.channels) # Wait for docs adds to complete adam_docs = adam_add_docs_task.result() assert len(adam_docs) == 1 # Assert that the changes feed woke up and that the doc change was propagated adam_changes = adam_changes_task.result() assert len(adam_changes["results"]) == 1 assert adam_changes["results"][0]["id"] == doc_id # Verify that the changes feed is still listening for Traun and Andy assert not traun_changes_task.done() assert not andy_changes_task.done() # Update the traun and andy to have one of adam's channels update_traun_user_task = ex.submit(client.update_user, url=sg_admin_url, db=sg_db, name=traun_user_info.name, password=traun_user_info.password, channels=["NBC"]) traun_auth = update_traun_user_task.result() update_andy_user_task = ex.submit(client.update_user, url=sg_admin_url, db=sg_db, name=andy_user_info.name, password=andy_user_info.password, channels=["ABC"]) andy_auth = update_andy_user_task.result() # Make sure changes feed wakes up and contains at least one change, 2 may be possible if the _user doc is included # Make sure the first change is 'adam_doc' traun_changes = traun_changes_task.result() assert 1 <= len(traun_changes["results"]) <= 2 assert traun_changes["results"][0]["id"] == "adam_doc_0" or traun_changes["results"][0]["id"] == "_user/traun" andy_changes = andy_changes_task.result() assert 1 <= len(andy_changes["results"]) <= 2 assert andy_changes["results"][0]["id"] == "adam_doc_0" or andy_changes["results"][0]["id"] == "_user/andy" # Block until user docs are seen client.verify_doc_id_in_changes(url=sg_url, db=sg_db, expected_doc_id="_user/adam", auth=adam_auth) client.verify_doc_id_in_changes(url=sg_url, db=sg_db, expected_doc_id="_user/traun", auth=traun_auth) client.verify_doc_id_in_changes(url=sg_url, db=sg_db, expected_doc_id="_user/andy", auth=andy_auth) # Make sure that adams doc shows up in changes due to the fact that the changes feed may be woken up with a _user doc above client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=adam_docs, auth=adam_auth) client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=adam_docs, auth=traun_auth) client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=adam_docs, auth=andy_auth) ############################################################ # changes feed wakes with Channel Removal via Sync function ############################################################ # Get latest last_seq for next test section adam_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=adam_auth) traun_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=traun_auth) andy_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=andy_auth) with concurrent.futures.ProcessPoolExecutor() as ex: # Start changes feed for 3 users from latest last_seq adam_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=adam_changes["last_seq"], timeout=10, auth=adam_auth) traun_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=traun_changes["last_seq"], timeout=10, auth=traun_auth) andy_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=andy_changes["last_seq"], timeout=10, auth=andy_auth) # Wait for changes feed to notice there are no changes and enter wait. 2 seconds should be more than enough time.sleep(2) # Make sure the changes future is still running and has not exited due to any new changes, the feed should be caught up # and waiting assert not adam_changes_task.done() assert not traun_changes_task.done() assert not andy_changes_task.done() # Remove the channels property from the doc client.update_doc(url=sg_url, db=sg_db, doc_id=doc_id, auth=traun_auth, channels=[]) # All three changes feeds should wake up and return one result adam_changes = adam_changes_task.result() assert len(adam_changes["results"]) == 1 assert adam_changes["results"][0]["removed"] == ["ABC", "NBC"] traun_changes = traun_changes_task.result() assert len(traun_changes["results"]) == 1 assert traun_changes["results"][0]["removed"] == ["NBC"] andy_changes = andy_changes_task.result() assert len(andy_changes["results"]) == 1 assert andy_changes["results"][0]["removed"] == ["ABC"] # Verify that users no longer can access the doc for user_auth in [adam_auth, traun_auth, andy_auth]: with pytest.raises(requests.exceptions.HTTPError) as excinfo: client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id, auth=user_auth) assert "403 Client Error: Forbidden for url:" in excinfo.value.message ############################################################ # changes feed wakes with Channel Grant via Sync function ############################################################ # Get latest last_seq for next test section adam_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=adam_auth) traun_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=traun_auth) andy_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=andy_auth) admin_auth = client.create_user(url=sg_admin_url, db=sg_db, name="admin", password="******", channels=["admin"]) channel_grant_doc_id = "channel_grant_with_doc_intially" # Add another doc with no channels channel_grant_doc_body = document.create_doc(doc_id=channel_grant_doc_id, channels=["admin"]) client.add_doc(url=sg_url, db=sg_db, doc=channel_grant_doc_body, auth=admin_auth) with concurrent.futures.ProcessPoolExecutor() as ex: # Start changes feed for 3 users from latest last_seq adam_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=adam_changes["last_seq"], timeout=10, auth=adam_auth) traun_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=traun_changes["last_seq"], timeout=10, auth=traun_auth) andy_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=andy_changes["last_seq"], timeout=10, auth=andy_auth) # Wait for changes feed to notice there are no changes and enter wait. 2 seconds should be more than enough time.sleep(2) # Make sure the changes future is still running and has not exited due to any new changes, the feed should be caught up # and waiting assert not adam_changes_task.done() assert not traun_changes_task.done() assert not andy_changes_task.done() # update the grant doc to have channel for all users update_task = ex.submit(client.update_doc, url=sg_url, db=sg_db, doc_id=channel_grant_doc_id, auth=admin_auth, channels=["admin", "ABC", "NBC"]) updated_doc = update_task.result() assert updated_doc["rev"].startswith("2-") # Verify that access grant wakes up changes feed for adam, traun, and Andy adam_changes = adam_changes_task.result() assert len(adam_changes["results"]) == 1 assert adam_changes["results"][0]["id"] == "channel_grant_with_doc_intially" assert adam_changes["results"][0]["changes"][0]["rev"].startswith("2-") traun_changes = traun_changes_task.result() assert len(traun_changes["results"]) == 1 assert traun_changes["results"][0]["id"] == "channel_grant_with_doc_intially" assert traun_changes["results"][0]["changes"][0]["rev"].startswith("2-") andy_changes = andy_changes_task.result() assert len(andy_changes["results"]) == 1 assert andy_changes["results"][0]["id"] == "channel_grant_with_doc_intially" assert andy_changes["results"][0]["changes"][0]["rev"].startswith("2-")
def test_backfill_channel_grant_to_role_longpoll(params_from_base_test_setup, sg_conf_name, grant_type, channels_to_grant): """ Test that check that docs are backfilled for a channel grant (via REST or SYNC) to existing role 1. Create a 'grantee' user with an empty role 2. 'pusher' user adds docs with channel(s) that will later be granted to 'grantee' 3. Verify that the 'pusher' sees the docs on its changes feed 4. Grant the 'grantee's role access to the pushers channels (either via REST or via sync function) 5. Verify that 'grantee' gets all of the docs after the grant """ cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] mode = params_from_base_test_setup["mode"] sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" num_docs_per_channel = 100 empty_role_name = "empty_role" log_info("grant_type: {}".format(grant_type)) log_info("channels to grant access to: {}".format(channels_to_grant)) is_multi_channel_grant = False if len(channels_to_grant) == 3: is_multi_channel_grant = True log_info("is_multi_channel_grant: {}".format(is_multi_channel_grant)) sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster = Cluster(cluster_config) cluster.reset(sg_conf) client = MobileRestClient() client.create_role(url=sg_admin_url, db=sg_db, name=empty_role_name, channels=[]) pusher_info = userinfo.UserInfo("pusher", "pass", channels=channels_to_grant, roles=[]) grantee_info = userinfo.UserInfo("grantee", "pass", channels=[], roles=[empty_role_name]) # Create users client.create_user(url=sg_admin_url, db=sg_db, name=pusher_info.name, password=pusher_info.password, channels=pusher_info.channels, roles=pusher_info.roles) pusher_session = client.create_session(url=sg_admin_url, db=sg_db, name=pusher_info.name, password=pusher_info.password) client.create_user(url=sg_admin_url, db=sg_db, name=grantee_info.name, password=grantee_info.password, channels=grantee_info.channels, roles=grantee_info.roles) grantee_session = client.create_session(url=sg_admin_url, db=sg_db, name=grantee_info.name, password=grantee_info.password) pusher_changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=pusher_session) # Make sure _user docs shows up in the changes feed assert len(pusher_changes["results"] ) == 1 and pusher_changes["results"][0]["id"] == "_user/pusher" # Add docs with the appropriate channels a_docs = client.add_docs(url=sg_url, db=sg_db, number=num_docs_per_channel, id_prefix=None, auth=pusher_session, channels=["A"]) assert len(a_docs) == 100 expected_docs = a_docs if is_multi_channel_grant: b_docs = client.add_docs(url=sg_url, db=sg_db, number=num_docs_per_channel, id_prefix=None, auth=pusher_session, channels=["B"]) assert len(b_docs) == 100 expected_docs += b_docs c_docs = client.add_docs(url=sg_url, db=sg_db, number=num_docs_per_channel, id_prefix=None, auth=pusher_session, channels=["C"]) assert len(c_docs) == 100 expected_docs += c_docs # Wait for all docs to show up on the changes feed before access grant client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=expected_docs, auth=pusher_session) # Get changes for granted before grant and assert the only changes is the user doc grantee_changes_before_grant = client.get_changes(url=sg_url, db=sg_db, since=0, auth=grantee_session) assert len(grantee_changes_before_grant["results"]) == 1 assert grantee_changes_before_grant["results"][0]["id"] == "_user/grantee" if grant_type == "CHANNEL-REST": # Grant channel access to role via REST client.update_role(url=sg_admin_url, db=sg_db, name=empty_role_name, channels=channels_to_grant) elif grant_type == "CHANNEL-SYNC": # Grant channel access to role via sync function access_doc = document.create_doc(doc_id="channel_grant_to_role") access_doc["roles"] = ["role:{}".format(empty_role_name)] access_doc["channels"] = channels_to_grant client.add_doc(url=sg_url, db=sg_db, doc=access_doc, auth=pusher_session, use_post=True) # Issue changes request after grant grantee_changes_post_grant = client.get_changes( url=sg_url, db=sg_db, since=grantee_changes_before_grant["last_seq"], auth=grantee_session, feed="longpoll") # grantee should have all the docs now if is_multi_channel_grant: # Check that the grantee gets all of the docs for channels ["A", "B", "C"] assert len( grantee_changes_post_grant["results"]) == num_docs_per_channel * 3 else: # Check that the grantee gets all of the docs for channels ["A"] assert len( grantee_changes_post_grant["results"]) == num_docs_per_channel # Disable this conditional if https://github.com/couchbase/sync_gateway/issues/2277 is fixed if mode == "di": # Issue one more changes request from the post grant last seq and make sure there are no other changes grantee_changes_post_post_grant = client.get_changes( url=sg_url, db=sg_db, since=grantee_changes_post_grant["last_seq"], auth=grantee_session, feed="normal") assert len(grantee_changes_post_post_grant["results"]) == 0
def test_remove_add_channels_to_doc(params_from_base_test_setup, sg_conf_name): cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] mode = params_from_base_test_setup["mode"] sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster = Cluster(cluster_config) cluster.reset(sg_conf) client = MobileRestClient() admin_user_info = userinfo.UserInfo("admin", "pass", channels=["A", "B"], roles=[]) a_user_info = userinfo.UserInfo("a_user", "pass", channels=["A"], roles=[]) admin_user_auth = client.create_user( url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password, channels=admin_user_info.channels, ) a_user_auth = client.create_user( url=sg_admin_url, db=sg_db, name=a_user_info.name, password=a_user_info.password, channels=a_user_info.channels ) a_docs = client.add_docs( url=sg_url, db=sg_db, number=50, id_prefix="a_doc", auth=admin_user_auth, channels=admin_user_info.channels ) # Build dictionay of a_docs a_docs_id_rev = {doc["id"]: doc["rev"] for doc in a_docs} assert len(a_docs_id_rev) == 50 # Wait for all docs to show up in changes client.verify_doc_id_in_changes(sg_url, sg_db, expected_doc_id="_user/a_user", auth=a_user_auth) client.verify_docs_in_changes(sg_url, sg_db, expected_docs=a_docs, auth=a_user_auth) # Get changes for 'a_user' a_user_changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=a_user_auth, feed="normal") # 'a_user' should get 50 'a_doc_*' doc and 1 '_user/a_user' doc assert len(a_user_changes["results"]) == 51 ########################### # Remove Channels from doc ########################### # Copy a_docs_id_rev to dictionary to scratch off values remove_docs_scratch_off = a_docs_id_rev.copy() assert len(remove_docs_scratch_off) == 50 # Use admin user to update the docs to remove 'A' from the channels property on the doc and add 'B' client.update_docs(url=sg_url, db=sg_db, docs=a_docs, number_updates=1, auth=admin_user_auth, channels=["B"]) # Longpoll loop requires due to the delay that changes take to permeate to the client changes_timeout = 10 start = time.time() last_seq = a_user_changes["last_seq"] while True: # If take longer than 10 seconds, fail the test if time.time() - start > changes_timeout: raise keywords.exceptions.TimeoutException("Could not find all expected docs in changs feed") # We found everything, exit loop! if remove_docs_scratch_off == {}: log_info("All expected docs found to be removed") break # Get changes for 'a_user' from last_seq a_user_changes = client.get_changes(url=sg_url, db=sg_db, since=last_seq, auth=a_user_auth, timeout=10) assert len(a_user_changes["results"]) > 0 # Loop over changes found and perform the following # 1. Check that the docs is flagged with 'removed' # 2. Cross off the doc fromt the the 'remove_docs_scratch_off' for change in a_user_changes["results"]: assert change["removed"] == ["A"] assert change["changes"][0]["rev"].startswith("2-") # This will blow up if any change is not found in that dictionary del remove_docs_scratch_off[change["id"]] # Update last_seq last_seq = a_user_changes["last_seq"] # Issue changes request from 'last_seq' and verify that the changes are up to date and returns no results a_user_changes = client.get_changes(url=sg_url, db=sg_db, since=last_seq, auth=a_user_auth, feed="normal") assert len(a_user_changes["results"]) == 0 ######################### # Add Channels to doc ######################### # Copy the a_docs_id_rev dictionary for scratch ovee add_docs_scratch_off = a_docs_id_rev.copy() assert len(add_docs_scratch_off) == 50 # Use admin user to update the docs to add ['A'] back to document channels client.update_docs(url=sg_url, db=sg_db, docs=a_docs, number_updates=1, auth=admin_user_auth, channels=["A"]) # Longpoll loop requires due to the delay that changes take to permeate to the client changes_timeout = 10 start = time.time() last_seq = a_user_changes["last_seq"] while True: # If take longer than 10 seconds, fail the test if time.time() - start > changes_timeout: raise keywords.exceptions.TimeoutException("Could not find all expected docs in changs feed") # We found everything, exit loop! if add_docs_scratch_off == {}: log_info("All expected docs found to be removed") break # Get changes for 'a_user' from last_seq a_user_changes = client.get_changes(url=sg_url, db=sg_db, since=last_seq, auth=a_user_auth, timeout=10) assert len(a_user_changes["results"]) > 0 # Loop over changes found and perform the following # 1. Check that the docs has a 3rd gen rev prefix # 2. Cross off the doc fromt the the 'add_docs_scratch_off' for change in a_user_changes["results"]: assert change["changes"][0]["rev"].startswith("3-") # This will blow up if any change is not found in that dictionary del add_docs_scratch_off[change["id"]] # Update last_seq last_seq = a_user_changes["last_seq"] # Issue changes request from 'last_seq' and verify that the changes are up to date and returns no results a_user_changes = client.get_changes(url=sg_url, db=sg_db, since=last_seq, auth=a_user_auth, feed="normal") assert len(a_user_changes["results"]) == 0
def test_server_goes_down_rebuild_channels(params_from_base_test_setup): """ 1. Start with a two node couchbase server cluster 2. Starting adding docs 3. Kill one of the server nodes and signal completion 4. Stop adding docs 5. Verify that that the expected docs are present and in the changes feed. 6. Start server again and add to cluster """ cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] cluster_helper = ClusterKeywords() sg_conf_name = "sync_gateway_default_functional_tests" sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper.reset_cluster(cluster_config=cluster_config, sync_gateway_config=sg_conf_path) topology = cluster_helper.get_cluster_topology(cluster_config) admin_sg = topology["sync_gateways"][0]["admin"] sg_url = topology["sync_gateways"][0]["public"] coucbase_servers = topology["couchbase_servers"] cbs_one_url = coucbase_servers[0] cbs_two_url = coucbase_servers[1] log_info("Running: 'test_server_goes_down_sanity'") log_info("cluster_config: {}".format(cluster_config)) log_info("admin_sg: {}".format(admin_sg)) log_info("sg_url: {}".format(sg_url)) log_info("cbs_one_url: {}".format(cbs_one_url)) log_info("cbs_two_url: {}".format(cbs_two_url)) sg_db = "db" num_docs = 100 admin_user_info = userinfo.UserInfo( name="admin", password="******", channels=["ABC"], roles=[] ) seth_user_info = userinfo.UserInfo( name="seth", password="******", channels=["ABC"], roles=[] ) client = MobileRestClient() main_server = CouchbaseServer(cbs_one_url) flakey_server = CouchbaseServer(cbs_two_url) admin_auth = client.create_user( admin_sg, sg_db, admin_user_info.name, admin_user_info.password, channels=admin_user_info.channels ) client.create_user( admin_sg, sg_db, seth_user_info.name, seth_user_info.password, channels=seth_user_info.channels ) seth_session = client.create_session(admin_sg, sg_db, seth_user_info.name) # allow any user docs to make it to changes initial_changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=seth_session) # push docs from admin docs = client.add_docs( url=sg_url, db=sg_db, number=num_docs, id_prefix=None, channels=admin_user_info.channels, auth=admin_auth ) assert len(docs) == num_docs client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=docs, auth=seth_session) changes_before_failover = client.get_changes(url=sg_url, db=sg_db, since=initial_changes["last_seq"], auth=seth_session) assert len(changes_before_failover["results"]) == num_docs # Stop server via 'service stop' flakey_server.stop() start = time.time() while True: # Fail tests if all docs do not succeed before timeout if (time.time() - start) > 60: # Bring server back up before failing the test flakey_server.start() main_server.recover(flakey_server) main_server.rebalance_in(coucbase_servers, flakey_server) raise keywords.exceptions.TimeoutError("Failed to rebuild changes") try: # Poll until failover happens (~30 second) client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=docs, auth=seth_session) # changes requests succeeded, exit loop break except requests.exceptions.HTTPError: # Changes will fail until failover of the down server happens. Wait and try again. log_info("/db/_changes failed due to server down. Retrying ...") time.sleep(1) # Verify no new changes changes = client.get_changes( url=sg_url, db=sg_db, since=changes_before_failover["last_seq"], auth=seth_session, feed="normal" ) assert len(changes["results"]) == 0 # Check that all changes are intact from initial changes request changes = client.get_changes(url=sg_url, db=sg_db, since=initial_changes["last_seq"], auth=seth_session) assert len(changes["results"]) == num_docs coucbase_servers = topology["couchbase_servers"] # Test succeeded without timeout, bring server back into topology flakey_server.start() main_server.recover(flakey_server) main_server.rebalance_in(coucbase_servers, flakey_server)
def test_take_down_bring_up_sg_accel_validate_cbgt(params_from_base_test_setup, sg_conf): """ Scenario 1 Start with 3 sg_accels Take down 2 sg_accels (block until down -- poll port if needed) Doc adds with uuids (~30 sec for cbgt to reshard) polling loop: wait for all docs to come back over changes feed Call validate pindex with correct number of accels Scenario 2 (Continuation) When bringing up, you'd have to poll the cbgt_cfg until you get expected number of nodes, then you could validate the pindex with 2 accels """ cluster_conf = params_from_base_test_setup["cluster_config"] log_info("Running 'test_dcp_reshard_single_sg_accel_goes_down_and_up'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) cluster_util = ClusterKeywords() topology = cluster_util.get_cluster_topology(cluster_conf) sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" client = MobileRestClient() doc_pusher_user_info = userinfo.UserInfo("doc_pusher", "pass", channels=["A"], roles=[]) doc_pusher_auth = client.create_user( url=sg_admin_url, db=sg_db, name=doc_pusher_user_info.name, password=doc_pusher_user_info.password, channels=doc_pusher_user_info.channels ) log_info("Shutting down sg_accels: [{}, {}]".format(cluster.sg_accels[1], cluster.sg_accels[2])) # Shutdown two accel nodes in parallel with concurrent.futures.ThreadPoolExecutor(max_workers=3) as ex: sg_accel_down_task_1 = ex.submit(cluster.sg_accels[1].stop) sg_accel_down_task_2 = ex.submit(cluster.sg_accels[2].stop) assert sg_accel_down_task_1.result() == 0 assert sg_accel_down_task_2.result() == 0 log_info("Finished taking nodes down!") # It should take some time ~30 for cbgt to pick up failing nodes and reshard the pindexes. During # this add a 1000 docs a start a longpoll changes loop to see if those docs make to to the changes feed # If the reshard is successful they will show up at somepoint after. If not, the docs will fail to show up. doc_pusher_docs = client.add_docs( url=sg_url, db=sg_db, number=1000, id_prefix=None, auth=doc_pusher_auth, channels=doc_pusher_user_info.channels ) assert len(doc_pusher_docs) == 1000 client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=doc_pusher_docs, auth=doc_pusher_auth, polling_interval=5) # The pindexes should be reshared at this point since all of the changes have shown up assert cluster.validate_cbgt_pindex_distribution(num_running_sg_accels=1) log_info("Start sg_accels: [{}, {}]".format(cluster.sg_accels[1], cluster.sg_accels[2])) # Start two accel nodes in parallel status = cluster.sg_accels[1].start(sg_conf) assert status == 0 # Poll on pIndex reshard after bring 2 accel nodes back assert cluster.validate_cbgt_pindex_distribution_retry(num_running_sg_accels=2) status = cluster.sg_accels[2].start(sg_conf) assert status == 0 # Poll on pIndex reshard after bring 2 accel nodes back assert cluster.validate_cbgt_pindex_distribution_retry(num_running_sg_accels=3)
def test_longpoll_awaken_roles(params_from_base_test_setup, sg_conf_name): cluster_conf = params_from_base_test_setup["cluster_config"] cluster_topology = params_from_base_test_setup["cluster_topology"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) sg_admin_url = cluster_topology["sync_gateways"][0]["admin"] sg_url = cluster_topology["sync_gateways"][0]["public"] log_info("sg_conf: {}".format(sg_conf)) log_info("sg_admin_url: {}".format(sg_admin_url)) log_info("sg_url: {}".format(sg_url)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) admin_role = "admin_role" admin_channel = "admin_channel" admin_user_info = userinfo.UserInfo(name="admin", password="******", channels=[], roles=[admin_role]) adam_user_info = userinfo.UserInfo(name="adam", password="******", channels=[], roles=[]) traun_user_info = userinfo.UserInfo(name="traun", password="******", channels=[], roles=[]) andy_user_info = userinfo.UserInfo(name="andy", password="******", channels=[], roles=[]) sg_db = "db" client = MobileRestClient() # Create a role on sync_gateway client.create_role(url=sg_admin_url, db=sg_db, name=admin_role, channels=[admin_channel]) # Create users with no channels or roles admin_auth = client.create_user(url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password, roles=[admin_role]) adam_auth = client.create_user(url=sg_admin_url, db=sg_db, name=adam_user_info.name, password=adam_user_info.password) traun_auth = client.create_user(url=sg_admin_url, db=sg_db, name=traun_user_info.name, password=traun_user_info.password) andy_auth = client.create_user(url=sg_admin_url, db=sg_db, name=andy_user_info.name, password=andy_user_info.password) ################################ # change feed wakes for role add ################################ # Get starting sequence of docs, use the last seq to progress past any _user docs. adam_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=adam_auth) traun_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=traun_auth) andy_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=andy_auth) # Add doc with channel associated with the admin role admin_doc = client.add_docs(url=sg_url, db=sg_db, number=1, id_prefix="admin_doc", auth=admin_auth, channels=[admin_channel]) client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=admin_doc, auth=admin_auth) with concurrent.futures.ProcessPoolExecutor() as ex: # Start changes feed for 3 users from latest last_seq adam_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=adam_changes["last_seq"], timeout=10, auth=adam_auth) traun_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=traun_changes["last_seq"], timeout=10, auth=traun_auth) andy_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=andy_changes["last_seq"], timeout=10, auth=andy_auth) # Wait for changes feed to notice there are no changes and enter wait. 2 seconds should be more than enough time.sleep(2) # Make sure the changes future is still running and has not exited due to any new changes, the feed should be caught up # and waiting assert not adam_changes_task.done() assert not traun_changes_task.done() assert not andy_changes_task.done() adam_auth = client.update_user(url=sg_admin_url, db=sg_db, name=adam_user_info.name, password=adam_user_info.password, roles=[admin_role]) traun_auth = client.update_user(url=sg_admin_url, db=sg_db, name=traun_user_info.name, password=traun_user_info.password, roles=[admin_role]) andy_auth = client.update_user(url=sg_admin_url, db=sg_db, name=andy_user_info.name, password=andy_user_info.password, roles=[admin_role]) adam_changes = adam_changes_task.result() assert 1 <= len(adam_changes["results"]) <= 2 assert adam_changes["results"][0]["id"] == "admin_doc_0" or adam_changes["results"][0]["id"] == "_user/adam" traun_changes = traun_changes_task.result() assert 1 <= len(traun_changes["results"]) <= 2 assert traun_changes["results"][0]["id"] == "admin_doc_0" or traun_changes["results"][0]["id"] == "_user/traun" andy_changes = andy_changes_task.result() assert 1 <= len(andy_changes["results"]) <= 2 assert andy_changes["results"][0]["id"] == "admin_doc_0" or andy_changes["results"][0]["id"] == "_user/andy" # Check that the user docs all show up in changes feed client.verify_doc_id_in_changes(url=sg_url, db=sg_db, expected_doc_id="_user/adam", auth=adam_auth) client.verify_doc_id_in_changes(url=sg_url, db=sg_db, expected_doc_id="_user/traun", auth=traun_auth) client.verify_doc_id_in_changes(url=sg_url, db=sg_db, expected_doc_id="_user/andy", auth=andy_auth) # Check that the admin doc made it to all the changes feeds client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=admin_doc, auth=adam_auth) client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=admin_doc, auth=traun_auth) client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=admin_doc, auth=andy_auth) # At this point, each user should have a changes feed that is caught up for the next section ########################################### # change feed wakes for channel add to role ########################################### abc_channel = "ABC" abc_pusher_info = userinfo.UserInfo(name="abc_pusher", password="******", channels=[abc_channel], roles=[]) abc_pusher_auth = client.create_user(url=sg_admin_url, db=sg_db, name=abc_pusher_info.name, password=abc_pusher_info.password, channels=abc_pusher_info.channels) # Add doc with ABC channel client.add_docs(url=sg_url, db=sg_db, number=1, id_prefix="abc_doc", auth=abc_pusher_auth, channels=[abc_channel]) # Get latest last_seq for next test section adam_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=adam_auth) traun_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=traun_auth) andy_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=andy_auth) with concurrent.futures.ProcessPoolExecutor() as ex: # Start changes feed for 3 users from latest last_seq adam_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=adam_changes["last_seq"], timeout=10, auth=adam_auth) traun_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=traun_changes["last_seq"], timeout=10, auth=traun_auth) andy_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=andy_changes["last_seq"], timeout=10, auth=andy_auth) # Wait for changes feed to notice there are no changes and enter wait. 2 seconds should be more than enough time.sleep(2) # Make sure the changes future is still running and has not exited due to any new changes, the feed should be caught up # and waiting assert not adam_changes_task.done() assert not traun_changes_task.done() assert not andy_changes_task.done() # Update admin role to include ABC channel # Since adam, traun, and andy are assigned to that role, they should wake up and get the 'abc_pusher_0' doc client.update_role(url=sg_admin_url, db=sg_db, name=admin_role, channels=[admin_channel, abc_channel]) adam_changes = adam_changes_task.result() assert len(adam_changes["results"]) == 1 assert adam_changes["results"][0]["id"] == "abc_doc_0" traun_changes = traun_changes_task.result() assert len(traun_changes["results"]) == 1 assert traun_changes["results"][0]["id"] == "abc_doc_0" andy_changes = adam_changes_task.result() assert len(andy_changes["results"]) == 1 assert andy_changes["results"][0]["id"] == "abc_doc_0"
def test_rebalance_sanity(params_from_base_test_setup): cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] cluster_helper = ClusterKeywords() sg_conf_name = "sync_gateway_default_functional_tests" sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper.reset_cluster(cluster_config=cluster_config, sync_gateway_config=sg_conf_path) topology = cluster_helper.get_cluster_topology(cluster_config) admin_sg_one = topology["sync_gateways"][0]["admin"] sg_one_url = topology["sync_gateways"][0]["public"] cluster_servers = topology["couchbase_servers"] cbs_one_url = cluster_servers[0] cbs_two_url = cluster_servers[1] log_info("Running: 'test_distributed_index_rebalance_sanity'") log_info("cluster_config: {}".format(cluster_config)) log_info("admin_sg: {}".format(admin_sg_one)) log_info("sg_url: {}".format(sg_one_url)) log_info("cbs_one_url: {}".format(cbs_one_url)) log_info("cbs_two_url: {}".format(cbs_two_url)) sg_db = "db" num_docs = 100 num_updates = 100 sg_user_name = "seth" sg_user_password = "******" channels = ["ABC", "CBS"] client = MobileRestClient() cb_server = CouchbaseServer(cbs_one_url) server_to_remove = CouchbaseServer(cbs_two_url) client.create_user(admin_sg_one, sg_db, sg_user_name, sg_user_password, channels=channels) session = client.create_session(admin_sg_one, sg_db, sg_user_name) with concurrent.futures.ThreadPoolExecutor(5) as executor: # Add docs to sg log_info("Adding docs to sync_gateway") docs = client.add_docs(sg_one_url, sg_db, num_docs, "test_doc", channels=channels, auth=session) assert len(docs) == num_docs # Start updating docs and rebalance out one CBS node log_info("Updating docs on sync_gateway") update_docs_task = executor.submit(client.update_docs, sg_one_url, sg_db, docs, num_updates, auth=session) # Run rebalance in background cb_server.rebalance_out(cluster_servers, server_to_remove) updated_docs = update_docs_task.result() log_info(updated_docs) # Verify docs / revisions present client.verify_docs_present(sg_one_url, sg_db, updated_docs, auth=session) # Verify docs revisions in changes feed client.verify_docs_in_changes(sg_one_url, sg_db, updated_docs, auth=session) # Rebalance Server back in to the pool cb_server.add_node(server_to_remove) cb_server.rebalance_in(cluster_servers, server_to_remove)
def test_backfill_channels_oneshot_limit_changes(params_from_base_test_setup, sg_conf_name, grant_type): cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] mode = params_from_base_test_setup["mode"] sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" log_info("grant_type: {}".format(grant_type)) sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster = Cluster(cluster_config) cluster.reset(sg_conf) client = MobileRestClient() admin_user_info = userinfo.UserInfo("admin", "pass", channels=["A"], roles=[]) user_b_user_info = userinfo.UserInfo("USER_B", "pass", channels=["B"], roles=[]) # Create users / sessions client.create_user(url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password, channels=admin_user_info.channels) client.create_user(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, password=user_b_user_info.password, channels=user_b_user_info.channels) admin_session = client.create_session(url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password) user_b_session = client.create_session(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, password=user_b_user_info.password) # Create 50 "A" channel docs a_docs = client.add_docs(url=sg_url, db=sg_db, number=50, id_prefix=None, auth=admin_session, channels=["A"]) assert len(a_docs) == 50 b_docs = client.add_docs(url=sg_url, db=sg_db, number=1, id_prefix="b_doc", auth=user_b_session, channels=["B"]) assert len(b_docs) == 1 user_doc = {"id": "_user/USER_B", "rev": None} b_docs.append(user_doc) # Loop until user_b sees b_doc_0 doc and _user/USER_B doc client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=b_docs, auth=user_b_session, strict=True) # Get last_seq for user_b user_b_changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=user_b_session, feed="normal") # Grant access to channel "A" if grant_type == "CHANNEL-REST": log_info("Granting user access to channel A via Admin REST user update") # Grant via update to user in Admin API client.update_user(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, channels=["A", "B"]) elif grant_type == "CHANNEL-SYNC": log_info("Granting user access to channel A sync function access()") # Grant via access() in sync_function, then id 'channel_access' will trigger an access(doc.users, doc.channels) access_doc = document.create_doc("channel_access", channels=["A"]) access_doc["users"] = ["USER_B"] client.add_doc(url=sg_url, db=sg_db, doc=access_doc, auth=admin_session) elif grant_type == "ROLE-REST": log_info("Granting user access to channel A via Admin REST role grant") # Create role with channel A client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"]) client.update_user(url=sg_admin_url, db=sg_db, name="USER_B", roles=["channel-A-role"]) elif grant_type == "ROLE-SYNC": log_info("Granting user access to channel A via sync function role() grant") # Create role with channel A client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"]) # Grant via role() in sync_function, then id 'role_access' will trigger an role(doc.users, doc.roles) role_access_doc = document.create_doc("role_access") role_access_doc["users"] = ["USER_B"] role_access_doc["roles"] = ["role:channel-A-role"] client.add_doc(sg_url, db=sg_db, doc=role_access_doc, auth=admin_session) else: pytest.fail("Unsupported grant_type!!!!") # Create a dictionary keyed on doc id for all of channel A docs ids_and_revs_from_a_docs = {doc["id"]: doc["rev"] for doc in a_docs} assert len(ids_and_revs_from_a_docs.keys()) == 50 log_info("Doing 3, 1 shot changes with limit and last seq!") # Issue 3 oneshot changes with a limit of 20 ################# # Changes Req #1 ################# user_b_changes_after_grant_one = client.get_changes(url=sg_url, db=sg_db, since=user_b_changes["last_seq"], auth=user_b_session, feed="normal", limit=20) # User B shoud have recieved 20 docs due to limit assert len(user_b_changes_after_grant_one["results"]) == 20 for doc in user_b_changes_after_grant_one["results"]: # cross off keys found from 'a_docs' dictionary del ids_and_revs_from_a_docs[doc["id"]] ################# # Changes Req #2 ################# user_b_changes_after_grant_two = client.get_changes(url=sg_url, db=sg_db, since=user_b_changes_after_grant_one["last_seq"], auth=user_b_session, feed="normal", limit=20) # User B shoud have recieved 20 docs due to limit assert len(user_b_changes_after_grant_two["results"]) == 20 for doc in user_b_changes_after_grant_two["results"]: # cross off keys found from 'a_docs' dictionary del ids_and_revs_from_a_docs[doc["id"]] ################# # Changes Req #3 ################# user_b_changes_after_grant_three = client.get_changes(url=sg_url, db=sg_db, since=user_b_changes_after_grant_two["last_seq"], auth=user_b_session, feed="normal", limit=20) # User B shoud have recieved 20 docs due to limit assert len(user_b_changes_after_grant_three["results"]) == 10 for doc in user_b_changes_after_grant_three["results"]: # cross off keys found from 'a_docs' dictionary del ids_and_revs_from_a_docs[doc["id"]] # Make sure all the docs have been crossed out assert len(ids_and_revs_from_a_docs) == 0 ################# # Changes Req #4 ################# user_b_changes_after_grant_four = client.get_changes(url=sg_url, db=sg_db, since=user_b_changes_after_grant_three["last_seq"], auth=user_b_session, feed="normal", limit=20) # Changes should be caught up and there should be no results assert len(user_b_changes_after_grant_four["results"]) == 0
def test_server_goes_down_sanity(params_from_base_test_setup): """ 1. Start with a two node couchbase server cluster 2. Starting adding docs 3. Kill one of the server nodes and signal completion 4. Stop adding docs 5. Verify that that the expected docs are present and in the changes feed. 6. Start server again and add to cluster """ cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] cluster_helper = ClusterKeywords() sg_conf_name = "sync_gateway_default_functional_tests" sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper.reset_cluster(cluster_config=cluster_config, sync_gateway_config=sg_conf_path) topology = cluster_helper.get_cluster_topology(cluster_config) admin_sg = topology["sync_gateways"][0]["admin"] sg_url = topology["sync_gateways"][0]["public"] coucbase_servers = topology["couchbase_servers"] cbs_one_url = coucbase_servers[0] cbs_two_url = coucbase_servers[1] log_info("Running: 'test_server_goes_down_sanity'") log_info("cluster_config: {}".format(cluster_config)) log_info("admin_sg: {}".format(admin_sg)) log_info("sg_url: {}".format(sg_url)) log_info("cbs_one_url: {}".format(cbs_one_url)) log_info("cbs_two_url: {}".format(cbs_two_url)) sg_db = "db" num_docs = 100 sg_user_name = "seth" sg_user_password = "******" channels = ["ABC", "CBS"] client = MobileRestClient() main_server = CouchbaseServer(cbs_one_url) flakey_server = CouchbaseServer(cbs_two_url) client.create_user(admin_sg, sg_db, sg_user_name, sg_user_password, channels=channels) session = client.create_session(admin_sg, sg_db, sg_user_name) # Stop second server flakey_server.stop() # Try to add 100 docs in a loop until all succeed, if the never do, fail with timeout errors = num_docs # Wait 30 seconds for auto failover # (Minimum value suggested - http://docs.couchbase.com/admin/admin/Tasks/tasks-nodeFailover.html) # + 15 seconds to add docs timeout = 45 start = time.time() successful_add = False while not successful_add: # Fail tests if all docs do not succeed before timeout if (time.time() - start) > timeout: # Bring server back up before failing the test flakey_server.start() main_server.rebalance_in(coucbase_servers, flakey_server) raise TimeoutError("Failed to successfully put docs before timeout") try: docs = client.add_docs(url=sg_url, db=sg_db, number=num_docs, id_prefix=None, auth=session, channels=channels) # If the above add doc does not throw, it was a successfull add. successful_add = True except requests.exceptions.HTTPError as he: log_info("Failed to add docs: {}".format(he)) log_info("Seeing: {} errors".format(errors)) time.sleep(1) assert len(docs) == 100 client.verify_docs_present(url=sg_url, db=sg_db, expected_docs=docs, auth=session) try: client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=docs, auth=session, polling_interval=5) except keywords.exceptions.TimeoutException: # timeout verifying docs. Bring server back in to restore topology, then fail # Failing due to https://github.com/couchbase/sync_gateway/issues/2197 flakey_server.start() main_server.recover(flakey_server) main_server.rebalance_in(coucbase_servers, flakey_server) raise keywords.exceptions.TimeoutException("Failed to get all changes") # Test succeeded without timeout, bring server back into topology flakey_server.start() main_server.recover(flakey_server) main_server.rebalance_in(coucbase_servers, flakey_server) # Make sure all docs were not added before server was log_info("test_server_goes_down_sanity complete!")
def test_backfill_channels_looping_longpoll_changes(params_from_base_test_setup, sg_conf_name, grant_type): cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] mode = params_from_base_test_setup["mode"] sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" log_info("grant_type: {}".format(grant_type)) sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster = Cluster(cluster_config) cluster.reset(sg_conf) client = MobileRestClient() admin_user_info = userinfo.UserInfo("admin", "pass", channels=["A"], roles=[]) user_b_user_info = userinfo.UserInfo("USER_B", "pass", channels=["B"], roles=[]) # Create users / sessions client.create_user(url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password, channels=admin_user_info.channels) client.create_user(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, password=user_b_user_info.password, channels=user_b_user_info.channels) admin_session = client.create_session(url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password) user_b_session = client.create_session(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, password=user_b_user_info.password) # Create 50 "A" channel docs a_docs = client.add_docs(url=sg_url, db=sg_db, number=50, id_prefix=None, auth=admin_session, channels=["A"]) assert len(a_docs) == 50 b_docs = client.add_docs(url=sg_url, db=sg_db, number=1, id_prefix="b_doc", auth=user_b_session, channels=["B"]) assert len(b_docs) == 1 user_doc = {"id": "_user/USER_B", "rev": None} b_docs.append(user_doc) # Loop until user_b sees b_doc_0 doc and _user/USER_B doc client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=b_docs, auth=user_b_session, strict=True) # Create a dictionary keyed on doc id for all of channel A docs ids_and_revs_from_a_docs = {doc["id"]: doc["rev"] for doc in a_docs} assert len(ids_and_revs_from_a_docs.keys()) == 50 # Get last_seq for user_b user_b_changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=user_b_session, feed="normal") with concurrent.futures.ProcessPoolExecutor() as ex: # Start long poll changes feed. changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=user_b_changes["last_seq"], auth=user_b_session, timeout=10, limit=20) # Grant access to channel "A" if grant_type == "CHANNEL-REST": log_info("Granting user access to channel A via Admin REST user update") # Grant via update to user in Admin API client.update_user(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, channels=["A", "B"]) elif grant_type == "CHANNEL-SYNC": log_info("Granting user access to channel A sync function access()") # Grant via access() in sync_function, then id 'channel_access' will trigger an access(doc.users, doc.channels) access_doc = document.create_doc("channel_access", channels=["A"]) access_doc["users"] = ["USER_B"] client.add_doc(url=sg_url, db=sg_db, doc=access_doc, auth=admin_session) elif grant_type == "ROLE-REST": log_info("Granting user access to channel A via Admin REST role grant") # Create role with channel A client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"]) client.update_user(url=sg_admin_url, db=sg_db, name="USER_B", roles=["channel-A-role"]) elif grant_type == "ROLE-SYNC": log_info("Granting user access to channel A via sync function role() grant") # Create role with channel A client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"]) # Grant via role() in sync_function, then id 'role_access' will trigger an role(doc.users, doc.roles) role_access_doc = document.create_doc("role_access") role_access_doc["users"] = ["USER_B"] role_access_doc["roles"] = ["role:channel-A-role"] client.add_doc(sg_url, db=sg_db, doc=role_access_doc, auth=admin_session) else: pytest.fail("Unsupported grant_type!!!!") # Block on return of longpoll changes, feed should wake up and return 20 results changes = changes_task.result() assert len(changes["results"]) == 20 num_requests = 1 # Cross the results off from the 'a_docs' dictionary for doc in changes["results"]: del ids_and_revs_from_a_docs[doc["id"]] # Start looping longpoll changes with limit, cross off changes from dictionary each time one is found # Since 20 changes should be crossed off already, this should execute 2x. log_info("Starting looping longpoll changes with limit!") last_seq = changes["last_seq"] while True: if len(ids_and_revs_from_a_docs.keys()) == 0: log_info("All docs were found! Exiting polling loop") break changes = client.get_changes(url=sg_url, db=sg_db, since=last_seq, auth=user_b_session, limit=20, timeout=10) num_requests += 1 # There are more than 2 requests, throw an exception. if num_requests == 2: assert len(changes["results"]) == 20 elif num_requests == 3: # This will be 10 or 11 depending on if the _user/ doc is returned assert 10 <= len(changes["results"]) <= 11 else: raise exceptions.ChangesError("Looping longpoll should only have to perform 3 requests to get all the changes!!") # Cross the results off from the 'a_docs' dictionary. # This will blow up in docs duplicate docs are sent to changes for doc in changes["results"]: if doc["id"] != "_user/USER_B": del ids_and_revs_from_a_docs[doc["id"]] last_seq = changes["last_seq"] # Shanges after longpoll zero_results = client.get_changes(url=sg_url, db=sg_db, since=last_seq, auth=user_b_session, feed="normal") # Changes should be caught up and there should be no results assert len(zero_results["results"]) == 0
def test_view_backfill_for_deletes(params_from_base_test_setup, sg_conf_name, validate_changes_before_restart): """ Scenario: 1. Write a bunch of docs 2. Delete 1/2 3. Restart Sync Gateway 4. Issue _changes, assert view backfills docs and delete notifications """ num_docs = 1000 sg_db = 'db' cluster_conf = params_from_base_test_setup['cluster_config'] cluster_topology = params_from_base_test_setup['cluster_topology'] mode = params_from_base_test_setup['mode'] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) sg_admin_url = cluster_topology['sync_gateways'][0]['admin'] sg_url = cluster_topology['sync_gateways'][0]['public'] cbs_url = cluster_topology['couchbase_servers'][0] log_info('sg_conf: {}'.format(sg_conf)) log_info('sg_admin_url: {}'.format(sg_admin_url)) log_info('sg_url: {}'.format(sg_url)) log_info('cbs_url: {}'.format(cbs_url)) log_info('validate_changes_before_restart: {}'.format( validate_changes_before_restart)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) # Create clients sg_client = MobileRestClient() # Create user / session seth_user_info = UserInfo(name='seth', password='******', channels=['NASA', 'NATGEO'], roles=[]) sg_client.create_user(url=sg_admin_url, db=sg_db, name=seth_user_info.name, password=seth_user_info.password, channels=seth_user_info.channels) seth_auth = sg_client.create_session(url=sg_admin_url, db=sg_db, name=seth_user_info.name, password=seth_user_info.password) # Add 'num_docs' to Sync Gateway doc_bodies = document.create_docs('test_doc', number=num_docs, channels=seth_user_info.channels) bulk_resp = sg_client.add_bulk_docs(url=sg_url, db=sg_db, docs=doc_bodies, auth=seth_auth) assert len(bulk_resp) == num_docs # Delete half of the docs randomly deleted_docs = [] for _ in range(num_docs / 2): random_doc = random.choice(bulk_resp) deleted_doc = sg_client.delete_doc(url=sg_url, db=sg_db, doc_id=random_doc['id'], rev=random_doc['rev'], auth=seth_auth) deleted_docs.append(deleted_doc) bulk_resp.remove(random_doc) log_info('Number of docs deleted: {}'.format(len(deleted_docs))) all_docs = bulk_resp + deleted_docs log_info('Number of docs to look for in changes: {}'.format(len(all_docs))) # This test will check changes before and after SG restart if # validate_changes_before_restart == True # If it is not set to True, only build the changes after restart if validate_changes_before_restart: # Verify deletions and inital docs show up in changes feed sg_client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=all_docs, auth=seth_auth) changes = sg_client.get_changes(url=sg_url, db=sg_db, since=0, auth=seth_auth) # All docs should show up + _user doc assert len(changes['results']) == num_docs + 1 deleted_doc_ids = [doc['id'] for doc in deleted_docs] assert len(deleted_doc_ids) == num_docs / 2 deleted_docs_in_changes = [ change['id'] for change in changes['results'] if 'deleted' in change and change['deleted'] ] assert len(deleted_docs_in_changes) == num_docs / 2 # All deleted docs should show up in the changes feed for doc_id in deleted_docs_in_changes: assert doc_id in deleted_doc_ids deleted_doc_ids.remove(doc_id) assert len(deleted_doc_ids) == 0 # Restart Sync Gateway sg_controller = SyncGateway() sg_controller.stop_sync_gateways(url=sg_url, cluster_config=cluster_conf) sg_controller.start_sync_gateways(url=sg_url, cluster_config=cluster_conf, config=sg_conf) # Verify deletions and inital docs show up in changes feed sg_client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=all_docs, auth=seth_auth) changes = sg_client.get_changes(url=sg_url, db=sg_db, since=0, auth=seth_auth) # All docs should show up + _user doc assert len(changes['results']) == num_docs + 1 deleted_doc_ids = [doc['id'] for doc in deleted_docs] assert len(deleted_doc_ids) == num_docs / 2 deleted_docs_in_changes = [ change['id'] for change in changes['results'] if 'deleted' in change and change['deleted'] ] assert len(deleted_docs_in_changes) == num_docs / 2 # All deleted docs should show up in th changes feed for doc_id in deleted_docs_in_changes: assert doc_id in deleted_doc_ids deleted_doc_ids.remove(doc_id) assert len(deleted_doc_ids) == 0
def test_initial_pull_replication(setup_client_syncgateway_test, continuous): """ 1. Prepare sync-gateway to have 10000 documents. 2. Create a single shot / continuous pull replicator and to pull the docs into a database. 3. Verify if all of the docs get pulled. Referenced issue: couchbase/couchbase-lite-android#955. """ sg_db = "db" ls_db = "ls_db" num_docs = 10000 cluster_config = setup_client_syncgateway_test["cluster_config"] ls_url = setup_client_syncgateway_test["ls_url"] sg_one_admin = setup_client_syncgateway_test["sg_admin_url"] sg_one_public = setup_client_syncgateway_test["sg_url"] sg_helper = SyncGateway() sg_helper.start_sync_gateway( cluster_config=cluster_config, url=sg_one_public, config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS) ) log_info("Running 'test_initial_pull_replication', continuous: {}".format(continuous)) log_info("ls_url: {}".format(ls_url)) log_info("sg_one_admin: {}".format(sg_one_admin)) log_info("sg_one_public: {}".format(sg_one_public)) client = MobileRestClient() client.create_user(sg_one_admin, sg_db, "seth", password="******", channels=["ABC", "NBC"]) session = client.create_session(sg_one_admin, sg_db, "seth") # Create 'num_docs' docs on sync_gateway docs = client.add_docs( url=sg_one_public, db=sg_db, number=num_docs, id_prefix="seeded_doc", generator="four_k", auth=session ) assert len(docs) == num_docs client.create_database(url=ls_url, name=ls_db) # Start oneshot pull replication repl_id = client.start_replication( url=ls_url, continuous=continuous, from_url=sg_one_admin, from_db=sg_db, to_db=ls_db ) start = time.time() if continuous: log_info("Waiting for replication status 'Idle' for: {}".format(repl_id)) # Android will report IDLE status, and drop into the 'verify_docs_present' below # due to https://github.com/couchbase/couchbase-lite-java-core/issues/1409 client.wait_for_replication_status_idle(ls_url, repl_id) else: log_info("Waiting for no replications: {}".format(repl_id)) client.wait_for_no_replications(ls_url) # Verify docs replicated to client client.verify_docs_present(url=ls_url, db=ls_db, expected_docs=docs, timeout=240) all_docs_replicated_time = time.time() - start log_info("Replication took: {}s".format(all_docs_replicated_time)) # Verify docs show up in client's changes feed client.verify_docs_in_changes(url=ls_url, db=ls_db, expected_docs=docs) replications = client.get_replications(url=ls_url) if continuous: assert len(replications) == 1, "There should only be one replication running" assert replications[0]["status"] == "Idle", "Replication Status should be 'Idle'" assert replications[0]["continuous"], "Running replication should be continuous" # Only .NET has an 'error' property if "error" in replications[0]: assert len(replications[0]["error"]) == 0 else: assert len(replications) == 0, "No replications should be running"
def test_backfill_channels_oneshot_limit_changes(params_from_base_test_setup, sg_conf_name, grant_type): """ Test that checks that docs are backfilled for one shot changes with limit for a access grant (via REST or SYNC) CHANNEL-REST = Channel is granted to user via REST CHANNEL-SYNC = Channel is granted to user via sync function access() ROLE-REST = Role is granted to user via REST ROLE-SYNC = Role is granted to user via sync function role() CHANNEL-TO-ROLE-REST = Channel is added to existing role via REST CHANNEL-TO-ROLE-SYNC = Channel is added to existing role via sync access() """ cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] mode = params_from_base_test_setup["mode"] sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" log_info("grant_type: {}".format(grant_type)) sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster = Cluster(cluster_config) cluster.reset(sg_conf) client = MobileRestClient() admin_user_info = userinfo.UserInfo("admin", "pass", channels=["A"], roles=[]) if grant_type == "CHANNEL-TO-ROLE-REST" or grant_type == "CHANNEL-TO-ROLE-SYNC": client.create_role(url=sg_admin_url, db=sg_db, name="empty_role", channels=[]) user_b_user_info = userinfo.UserInfo("USER_B", "pass", channels=["B"], roles=["empty_role"]) else: user_b_user_info = userinfo.UserInfo("USER_B", "pass", channels=["B"], roles=[]) # Create users / sessions client.create_user(url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password, channels=admin_user_info.channels) client.create_user(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, password=user_b_user_info.password, channels=user_b_user_info.channels, roles=user_b_user_info.roles) admin_session = client.create_session(url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password) user_b_session = client.create_session(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, password=user_b_user_info.password) # Create 50 "A" channel docs a_docs = client.add_docs(url=sg_url, db=sg_db, number=50, id_prefix=None, auth=admin_session, channels=["A"]) assert len(a_docs) == 50 b_docs = client.add_docs(url=sg_url, db=sg_db, number=1, id_prefix="b_doc", auth=user_b_session, channels=["B"]) assert len(b_docs) == 1 # Loop until admin user sees docs in changes client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=a_docs, auth=admin_session) user_doc = {"id": "_user/USER_B", "rev": None} b_docs.append(user_doc) # Loop until user_b sees b_doc_0 doc and _user/USER_B doc client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=b_docs, auth=user_b_session, strict=True) # Get last_seq for user_b user_b_changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=user_b_session, feed="normal") # Grant access to channel "A" if grant_type == "CHANNEL-REST": log_info( "Granting user access to channel A via Admin REST user update") # Grant via update to user in Admin API client.update_user(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, channels=["A", "B"]) elif grant_type == "CHANNEL-SYNC": log_info("Granting user access to channel A sync function access()") # Grant via access() in sync_function, then id 'channel_access' will trigger an access(doc.users, doc.channels) access_doc = document.create_doc("channel_access", channels=["A"]) access_doc["users"] = ["USER_B"] client.add_doc(url=sg_url, db=sg_db, doc=access_doc, auth=admin_session) elif grant_type == "ROLE-REST": log_info("Granting user access to channel A via Admin REST role grant") # Create role with channel A client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"]) client.update_user(url=sg_admin_url, db=sg_db, name="USER_B", roles=["channel-A-role"]) elif grant_type == "ROLE-SYNC": log_info( "Granting user access to channel A via sync function role() grant") # Create role with channel A client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"]) # Grant via role() in sync_function, then id 'role_access' will trigger an role(doc.users, doc.roles) role_access_doc = document.create_doc("role_access") role_access_doc["users"] = ["USER_B"] role_access_doc["roles"] = ["role:channel-A-role"] client.add_doc(sg_url, db=sg_db, doc=role_access_doc, auth=admin_session) elif grant_type == "CHANNEL-TO-ROLE-REST": # Update the empty_role to have channel "A" client.update_role(url=sg_admin_url, db=sg_db, name="empty_role", channels=["A"]) elif grant_type == "CHANNEL-TO-ROLE-SYNC": # Grant empty_role access to channel "A" via sync function # Grant channel access to role via sync function access_doc = document.create_doc("channel_grant_to_role") access_doc["roles"] = ["role:empty_role"] access_doc["channels"] = ["A"] client.add_doc(url=sg_url, db=sg_db, doc=access_doc, auth=admin_session, use_post=True) else: pytest.fail("Unsupported grant_type!!!!") # Create a dictionary keyed on doc id for all of channel A docs ids_and_revs_from_a_docs = {doc["id"]: doc["rev"] for doc in a_docs} assert len(ids_and_revs_from_a_docs.keys()) == 50 log_info("Doing 3, 1 shot changes with limit and last seq!") # Issue 3 oneshot changes with a limit of 20 # Issue one shot changes to make sure access grant is successful, the change may not propagate immediately so retry. num_retries = 3 count = 0 while True: if count == num_retries: raise exceptions.ChangesError( "Didn't get all expected changes before timing out!") user_b_changes_after_grant_one = client.get_changes( url=sg_url, db=sg_db, since=user_b_changes["last_seq"], auth=user_b_session, feed="normal", limit=20) if len(user_b_changes_after_grant_one["results"]) > 0: # Found changes, break out an validate changes! break time.sleep(1) count += 1 ################# # Changes Req #1 ################# # Expect a user doc in the changes if grant_type == "CHANNEL-REST" or grant_type == "ROLE-REST": ids_and_revs_from_a_docs["_user/USER_B"] = None # User B shoud have recieved 20 docs due to limit assert len(user_b_changes_after_grant_one["results"]) == 20 for doc in user_b_changes_after_grant_one["results"]: # cross off keys found from 'a_docs' dictionary del ids_and_revs_from_a_docs[doc["id"]] ################# # Changes Req #2 ################# user_b_changes_after_grant_two = client.get_changes( url=sg_url, db=sg_db, since=user_b_changes_after_grant_one["last_seq"], auth=user_b_session, feed="normal", limit=20) # User B shoud have recieved 20 docs due to limit assert len(user_b_changes_after_grant_two["results"]) == 20 for doc in user_b_changes_after_grant_two["results"]: # cross off keys found from 'a_docs' dictionary del ids_and_revs_from_a_docs[doc["id"]] ################# # Changes Req #3 ################# user_b_changes_after_grant_three = client.get_changes( url=sg_url, db=sg_db, since=user_b_changes_after_grant_two["last_seq"], auth=user_b_session, feed="normal", limit=20) # User B should have recieved 10 docs due to limit or 11 docs with with a terminating _user doc # The terminating user doc only happens with a grant via REST if grant_type == "CHANNEL-REST" or grant_type == "ROLE-REST": assert len(user_b_changes_after_grant_three["results"]) == 11 else: assert len(user_b_changes_after_grant_three["results"]) == 10 for doc in user_b_changes_after_grant_three["results"]: # cross off non user doc keys found from 'a_docs' dictionary del ids_and_revs_from_a_docs[doc["id"]] # Make sure all the docs have been crossed out assert len(ids_and_revs_from_a_docs) == 0 ################# # Changes Req #4 ################# user_b_changes_after_grant_four = client.get_changes( url=sg_url, db=sg_db, since=user_b_changes_after_grant_three["last_seq"], auth=user_b_session, feed="normal", limit=20) # Changes should be caught up and there should be no results assert len(user_b_changes_after_grant_four["results"]) == 0
def test_take_down_bring_up_sg_accel_validate_cbgt(params_from_base_test_setup, sg_conf): """ Scenario 1 Start with 3 sg_accels Take down 2 sg_accels (block until down -- poll port if needed) Doc adds with uuids (~30 sec for cbgt to reshard) polling loop: wait for all docs to come back over changes feed Call validate pindex with correct number of accels Scenario 2 (Continuation) When bringing up, you'd have to poll the cbgt_cfg until you get expected number of nodes, then you could validate the pindex with 2 accels """ cluster_conf = params_from_base_test_setup["cluster_config"] log_info("Running 'test_dcp_reshard_single_sg_accel_goes_down_and_up'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) cluster_util = ClusterKeywords() topology = cluster_util.get_cluster_topology(cluster_conf) sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" client = MobileRestClient() doc_pusher_user_info = userinfo.UserInfo("doc_pusher", "pass", channels=["A"], roles=[]) doc_pusher_auth = client.create_user( url=sg_admin_url, db=sg_db, name=doc_pusher_user_info.name, password=doc_pusher_user_info.password, channels=doc_pusher_user_info.channels) log_info("Shutting down sg_accels: [{}, {}]".format( cluster.sg_accels[1], cluster.sg_accels[2])) # Shutdown two accel nodes in parallel with concurrent.futures.ThreadPoolExecutor(max_workers=3) as ex: sg_accel_down_task_1 = ex.submit(cluster.sg_accels[1].stop) sg_accel_down_task_2 = ex.submit(cluster.sg_accels[2].stop) assert sg_accel_down_task_1.result() == 0 assert sg_accel_down_task_2.result() == 0 log_info("Finished taking nodes down!") # It should take some time ~30 for cbgt to pick up failing nodes and reshard the pindexes. During # this add a 1000 docs a start a longpoll changes loop to see if those docs make to to the changes feed # If the reshard is successful they will show up at somepoint after. If not, the docs will fail to show up. doc_pusher_docs = client.add_docs(url=sg_url, db=sg_db, number=1000, id_prefix=None, auth=doc_pusher_auth, channels=doc_pusher_user_info.channels) assert len(doc_pusher_docs) == 1000 client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=doc_pusher_docs, auth=doc_pusher_auth, polling_interval=5) # The pindexes should be reshared at this point since all of the changes have shown up assert cluster.validate_cbgt_pindex_distribution(num_running_sg_accels=1) log_info("Start sg_accels: [{}, {}]".format(cluster.sg_accels[1], cluster.sg_accels[2])) # Start two accel nodes in parallel status = cluster.sg_accels[1].start(sg_conf) assert status == 0 # Poll on pIndex reshard after bring 2 accel nodes back assert cluster.validate_cbgt_pindex_distribution_retry( num_running_sg_accels=2) status = cluster.sg_accels[2].start(sg_conf) assert status == 0 # Poll on pIndex reshard after bring 2 accel nodes back assert cluster.validate_cbgt_pindex_distribution_retry( num_running_sg_accels=3)
def test_backfill_channels_oneshot_changes(params_from_base_test_setup, sg_conf_name, grant_type): """ Test that checks that docs are backfilled for one shot changes for a access grant (via REST or SYNC) CHANNEL-REST = Channel is granted to user via REST CHANNEL-SYNC = Channel is granted to user via sync function access() ROLE-REST = Role is granted to user via REST ROLE-SYNC = Role is granted to user via sync function role() CHANNEL-TO-ROLE-REST = Channel is added to existing role via REST CHANNEL-TO-ROLE-SYNC = Channel is added to existing role via sync access() """ cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] mode = params_from_base_test_setup["mode"] sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" log_info("grant_type: {}".format(grant_type)) sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster = Cluster(cluster_config) cluster.reset(sg_conf) client = MobileRestClient() admin_user_info = userinfo.UserInfo("admin", "pass", channels=["A"], roles=[]) if grant_type == "CHANNEL-TO-ROLE-REST" or grant_type == "CHANNEL-TO-ROLE-SYNC": client.create_role(url=sg_admin_url, db=sg_db, name="empty_role", channels=[]) user_b_user_info = userinfo.UserInfo("USER_B", "pass", channels=["B"], roles=["empty_role"]) else: user_b_user_info = userinfo.UserInfo("USER_B", "pass", channels=["B"], roles=[]) # Create users / sessions client.create_user(url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password, channels=admin_user_info.channels) client.create_user(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, password=user_b_user_info.password, channels=user_b_user_info.channels, roles=user_b_user_info.roles) admin_session = client.create_session(url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password) user_b_session = client.create_session(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, password=user_b_user_info.password) # Create 50 "A" channel docs a_docs = client.add_docs(url=sg_url, db=sg_db, number=50, id_prefix=None, auth=admin_session, channels=["A"]) assert len(a_docs) == 50 b_docs = client.add_docs(url=sg_url, db=sg_db, number=1, id_prefix="b_doc", auth=user_b_session, channels=["B"]) assert len(b_docs) == 1 user_doc = {"id": "_user/USER_B", "rev": None} b_docs.append(user_doc) # Loop until admin user sees docs in changes client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=a_docs, auth=admin_session) # Loop until user_b sees b_doc_0 doc and _user/USER_B doc client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=b_docs, auth=user_b_session, strict=True) # Get last_seq for user_b user_b_changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=user_b_session, feed="normal") # Grant access to channel "A" if grant_type == "CHANNEL-REST": log_info( "Granting user access to channel A via Admin REST user update") # Grant via update to user in Admin API client.update_user(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, channels=["A", "B"]) elif grant_type == "CHANNEL-SYNC": log_info("Granting user access to channel A sync function access()") # Grant via access() in sync_function, then id 'channel_access' will trigger an access(doc.users, doc.channels) access_doc = document.create_doc("channel_access", channels=["A"]) access_doc["users"] = ["USER_B"] client.add_doc(url=sg_url, db=sg_db, doc=access_doc, auth=admin_session) elif grant_type == "ROLE-REST": log_info("Granting user access to channel A via Admin REST role grant") # Create role with channel A client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"]) client.update_user(url=sg_admin_url, db=sg_db, name="USER_B", channels=["B"], roles=["channel-A-role"]) elif grant_type == "ROLE-SYNC": log_info( "Granting user access to channel A via sync function role() grant") # Create role with channel A client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"]) # Grant via role() in sync_function, then id 'role_access' will trigger an role(doc.users, doc.roles) role_access_doc = document.create_doc("role_access") role_access_doc["users"] = ["USER_B"] role_access_doc["roles"] = ["role:channel-A-role"] client.add_doc(sg_url, db=sg_db, doc=role_access_doc, auth=admin_session) elif grant_type == "CHANNEL-TO-ROLE-REST": # Update the empty_role to have channel "A" client.update_role(url=sg_admin_url, db=sg_db, name="empty_role", channels=["A"]) elif grant_type == "CHANNEL-TO-ROLE-SYNC": # Grant empty_role access to channel "A" via sync function # Grant channel access to role via sync function access_doc = document.create_doc("channel_grant_to_role") access_doc["roles"] = ["role:empty_role"] access_doc["channels"] = ["A"] client.add_doc(url=sg_url, db=sg_db, doc=access_doc, auth=admin_session, use_post=True) else: pytest.fail("Unsupported grant_type!!!!") # Issue one shot changes to make sure access grant is successful, the change may not propagate immediately so retry. num_retries = 3 count = 0 while True: if count == num_retries: raise exceptions.ChangesError( "Didn't get all expected changes before timing out!") user_b_changes_after_grant = client.get_changes( url=sg_url, db=sg_db, since=user_b_changes["last_seq"], auth=user_b_session, feed="normal") if len(user_b_changes_after_grant["results"]) > 0: # Found changes, break out an validate changes! break time.sleep(1) count += 1 # User B shoud have recieved 51 docs (a_docs + 1 _user/USER_B doc) if a REST grant or 50 changes if the grant # is via the sync function changes_results = user_b_changes_after_grant["results"] assert 50 <= len(changes_results) <= 51 # Create a dictionary of id rev pair of all the docs that are not "_user/" docs from changes ids_and_revs_from_user_changes = { change["id"]: change["changes"][0]["rev"] for change in changes_results if not change["id"].startswith("_user/") } assert len(ids_and_revs_from_user_changes) == 50 # Create a list of id rev pair of all of the channel A docs ids_and_revs_from_a_docs = {doc["id"]: doc["rev"] for doc in a_docs} assert len(ids_and_revs_from_a_docs) == 50 # Check that the changes and the a_docs are identical in id and rev assert ids_and_revs_from_user_changes == ids_and_revs_from_a_docs # Get changes from last_seq of the changes request after the grant. There should be no new changes user_b_changes = client.get_changes( url=sg_url, db=sg_db, since=user_b_changes_after_grant["last_seq"], auth=user_b_session, feed="normal") assert len(user_b_changes["results"]) == 0
def test_listener_two_sync_gateways(setup_client_syncgateway_test): """ Port of https://github.com/couchbaselabs/sync-gateway-tests/blob/master/tests/cbl-replication-mismatch-2-gateways.js Scenario: 1. Start 2 sync_gateways 2. Create sg_db_one db on sync_gateway one 3. Create sg_db_two db on sync_gateway two 4. Create ls_db_one and ls_db_two on Liteserv 5. Setup continuous push / pull replication from ls_db_one <-> sg_db_one 6. Setup continuous push / pull replication from ls_db_two <-> sg_db_two 7. Setup continuous push / pull replication from sg_db_one <-> ls_db_two 8. Setup continuous push / pull replication from sg_db_two <-> ls_db_one 9. Add num_docs / 2 to each liteserv database 10. Verify each database has num_docs docs 11. Verify all_docs in all dbs 12. Verify changes feed for sg_db_one and sg_db_two 13. Verify chnages feed for ls_db_one and ls_db_two """ num_docs = 500 ls_url = setup_client_syncgateway_test["ls_url"] cluster_config = setup_client_syncgateway_test["cluster_config"] sg_mode = setup_client_syncgateway_test["sg_mode"] cluster_util = ClusterKeywords() topology = cluster_util.get_cluster_topology(cluster_config) sg_one_admin_url = topology["sync_gateways"][0]["admin"] sg_two_admin_url = topology["sync_gateways"][1]["admin"] cb_server_url = topology["couchbase_servers"][0] log_info("Sync Gateway 1 admin url: {}".format(sg_one_admin_url)) log_info("Sync Gateway 2 admin url: {}".format(sg_two_admin_url)) log_info("Couchbase Server url: {}".format(cb_server_url)) c = cluster.Cluster(cluster_config) sg_config_path = sync_gateway_config_path_for_mode( "listener_tests/multiple_sync_gateways", sg_mode) c.reset(sg_config_path=sg_config_path) ls_db_one = "ls_db1" ls_db_two = "ls_db2" sg_db_one = "sg_db1" sg_db_two = "sg_db2" log_info("ls_url: {}".format(ls_url)) log_info("sg_one_admin_url: {}".format(sg_one_admin_url)) log_info("sg_two_admin_url: {}".format(sg_two_admin_url)) log_info("num_docs: {}".format(num_docs)) log_info("Running 'test_listener_two_sync_gateways' ...") client = MobileRestClient() # Delete sg_db2 on sync_gateway 1 client.delete_database(url=sg_one_admin_url, name=sg_db_two) # Delete sg_db1 on sync_gateway 2 client.delete_database(url=sg_two_admin_url, name=sg_db_one) # Create dbs on LiteServ client.create_database(ls_url, ls_db_one) client.create_database(ls_url, ls_db_two) # Start continuous push pull replication ls_db_one <-> sg_db_one client.start_replication(url=ls_url, continuous=True, from_db=ls_db_one, to_url=sg_one_admin_url, to_db=sg_db_one) client.start_replication(url=ls_url, continuous=True, from_url=sg_one_admin_url, from_db=sg_db_one, to_db=ls_db_one) # Start continuous push pull replication ls_db_two <-> sg_db_two client.start_replication(url=ls_url, continuous=True, from_db=ls_db_two, to_url=sg_two_admin_url, to_db=sg_db_two) client.start_replication(url=ls_url, continuous=True, from_url=sg_two_admin_url, from_db=sg_db_two, to_db=ls_db_two) # Start continuous push pull replication sg_db_one <-> ls_db_two client.start_replication(url=ls_url, continuous=True, from_url=sg_one_admin_url, from_db=sg_db_one, to_db=ls_db_two) client.start_replication(url=ls_url, continuous=True, from_db=ls_db_two, to_url=sg_one_admin_url, to_db=sg_db_one) # Start continuous push pull replication sg_db_two <-> ls_db_one client.start_replication(url=ls_url, continuous=True, from_url=sg_two_admin_url, from_db=sg_db_two, to_db=ls_db_one) client.start_replication(url=ls_url, continuous=True, from_db=ls_db_one, to_url=sg_two_admin_url, to_db=sg_db_two) ls_db_one_docs = client.add_docs(url=ls_url, db=ls_db_one, number=num_docs / 2, id_prefix="ls_db_one_doc") assert len(ls_db_one_docs) == num_docs / 2 ls_db_two_docs = client.add_docs(url=ls_url, db=ls_db_two, number=num_docs / 2, id_prefix="ls_db_two_doc") assert len(ls_db_two_docs) == num_docs / 2 all_docs = client.merge(ls_db_one_docs, ls_db_two_docs) assert len(all_docs) == 500 # Verify docs replicate to each db client.verify_docs_present(url=ls_url, db=ls_db_one, expected_docs=all_docs) client.verify_docs_present(url=ls_url, db=ls_db_two, expected_docs=all_docs) client.verify_docs_present(url=sg_one_admin_url, db=sg_db_one, expected_docs=all_docs) client.verify_docs_present(url=sg_two_admin_url, db=sg_db_two, expected_docs=all_docs) # Verify changes feeds for each db client.verify_docs_in_changes(url=ls_url, db=ls_db_one, expected_docs=all_docs) client.verify_docs_in_changes(url=ls_url, db=ls_db_two, expected_docs=all_docs) client.verify_docs_in_changes(url=sg_one_admin_url, db=sg_db_one, expected_docs=all_docs) client.verify_docs_in_changes(url=sg_two_admin_url, db=sg_db_two, expected_docs=all_docs)
def test_listener_two_sync_gateways(setup_client_2sgs_test): """ Port of https://github.com/couchbaselabs/sync-gateway-tests/blob/master/tests/cbl-replication-mismatch-2-gateways.js Scenario: 1. Start 2 sync_gateways 2. Create sg_db_one db on sync_gateway one 3. Create sg_db_two db on sync_gateway two 4. Create ls_db_one and ls_db_two on Liteserv 5. Setup continuous push / pull replication from ls_db_one <-> sg_db_one 6. Setup continuous push / pull replication from ls_db_two <-> sg_db_two 7. Setup continuous push / pull replication from sg_db_one <-> ls_db_two 8. Setup continuous push / pull replication from sg_db_two <-> ls_db_one 9. Add num_docs / 2 to each liteserv database 10. Verify each database has num_docs docs 11. Verify all_docs in all dbs 12. Verify changes feed for sg_db_one and sg_db_two 13. Verify chnages feed for ls_db_one and ls_db_two """ num_docs = 500 ls_url = setup_client_2sgs_test["ls_url"] cluster_config = setup_client_2sgs_test["cluster_config"] sg_one_admin_url = setup_client_2sgs_test["sg_one_admin_url"] sg_two_admin_url = setup_client_2sgs_test["sg_two_admin_url"] sg_util = SyncGateway() sg_util.start_sync_gateway(cluster_config=cluster_config, url=sg_one_admin_url, config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)) sg_util.start_sync_gateway(cluster_config=cluster_config, url=sg_two_admin_url, config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)) ls_db_one = "ls_db1" ls_db_two = "ls_db2" sg_db_one = "sg_db1" sg_db_two = "sg_db2" log_info("ls_url: {}".format(ls_url)) log_info("sg_one_admin_url: {}".format(sg_one_admin_url)) log_info("sg_two_admin_url: {}".format(sg_two_admin_url)) log_info("num_docs: {}".format(num_docs)) log_info("Running 'test_listener_two_sync_gateways' ...") client = MobileRestClient() # Create dbs on sync_gateway client.create_database(sg_one_admin_url, sg_db_one, "walrus:") client.create_database(sg_two_admin_url, sg_db_two, "walrus:") # Create dbs on LiteServ client.create_database(ls_url, ls_db_one) client.create_database(ls_url, ls_db_two) # Start continuous push pull replication ls_db_one <-> sg_db_one client.start_replication( url=ls_url, continuous=True, from_db=ls_db_one, to_url=sg_one_admin_url, to_db=sg_db_one ) client.start_replication( url=ls_url, continuous=True, from_url=sg_one_admin_url, from_db=sg_db_one, to_db=ls_db_one ) # Start continuous push pull replication ls_db_two <-> sg_db_two client.start_replication( url=ls_url, continuous=True, from_db=ls_db_two, to_url=sg_two_admin_url, to_db=sg_db_two ) client.start_replication( url=ls_url, continuous=True, from_url=sg_two_admin_url, from_db=sg_db_two, to_db=ls_db_two ) # Start continuous push pull replication sg_db_one <-> ls_db_two client.start_replication( url=ls_url, continuous=True, from_url=sg_one_admin_url, from_db=sg_db_one, to_db=ls_db_two ) client.start_replication( url=ls_url, continuous=True, from_db=ls_db_two, to_url=sg_one_admin_url, to_db=sg_db_one ) # Start continuous push pull replication sg_db_two <-> ls_db_one client.start_replication( url=ls_url, continuous=True, from_url=sg_two_admin_url, from_db=sg_db_two, to_db=ls_db_one ) client.start_replication( url=ls_url, continuous=True, from_db=ls_db_one, to_url=sg_two_admin_url, to_db=sg_db_two ) ls_db_one_docs = client.add_docs(url=ls_url, db=ls_db_one, number=num_docs / 2, id_prefix="ls_db_one_doc") assert len(ls_db_one_docs) == num_docs / 2 ls_db_two_docs = client.add_docs(url=ls_url, db=ls_db_two, number=num_docs / 2, id_prefix="ls_db_two_doc") assert len(ls_db_two_docs) == num_docs / 2 all_docs = client.merge(ls_db_one_docs, ls_db_two_docs) assert len(all_docs) == 500 # Verify docs replicate to each db client.verify_docs_present(url=ls_url, db=ls_db_one, expected_docs=all_docs) client.verify_docs_present(url=ls_url, db=ls_db_two, expected_docs=all_docs) client.verify_docs_present(url=sg_one_admin_url, db=sg_db_one, expected_docs=all_docs) client.verify_docs_present(url=sg_two_admin_url, db=sg_db_two, expected_docs=all_docs) # Verify changes feeds for each db client.verify_docs_in_changes(url=ls_url, db=ls_db_one, expected_docs=all_docs) client.verify_docs_in_changes(url=ls_url, db=ls_db_two, expected_docs=all_docs) client.verify_docs_in_changes(url=sg_one_admin_url, db=sg_db_one, expected_docs=all_docs) client.verify_docs_in_changes(url=sg_two_admin_url, db=sg_db_two, expected_docs=all_docs)