def test_auto_prune_listener_sanity(setup_client_syncgateway_test): """Sanity test for the autoprune feature 1. Create a db and put a doc 2. Update the docs past the default revs_limit (20) 3. Assert the the docs only retain 20 revs """ ls_url = setup_client_syncgateway_test["ls_url"] client = MobileRestClient() log_info("Running 'test_auto_prune_listener_sanity' ...") log_info("ls_url: {}".format(ls_url)) num_docs = 1 num_revs = 100 ls_db = client.create_database(url=ls_url, name="ls_db") docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs, id_prefix="ls_db_doc") assert len(docs) == num_docs client.update_docs(url=ls_url, db=ls_db, docs=docs, number_updates=num_revs) client.verify_max_revs_num_for_docs(url=ls_url, db=ls_db, docs=docs, expected_max_number_revs_per_doc=20)
def test_peer_2_peer_sanity_push_one_shot_continuous(setup_p2p_test): """ 1. Create ls_db1 database on LiteServ One 2. Create ls_db2 database on LiteServ Two 3. Add 10000 docs to LiteServ 1 ls_db1 4. Create one shot push replication LiteServ 1 ls_db1 -> LiteServ 2 ls_db2 5. sleep for 5 seconds 6. Create continuous push replication LiteServ 1 ls_db1 -> LiteServ 2 ls_db2 7. Add 10000 docs to LiteServ 1 ls_db1 8. Verify all docs replicate to LiteServ 2 ls_db2 9. Verify all docs show up in changes for LiteServ 2 ls_db2 """ ls_url_one = setup_p2p_test["ls_url_one"] ls_url_two = setup_p2p_test["ls_url_two"] num_docs_per_db = 10000 log_info("ls_url_one: {}".format(ls_url_one)) log_info("ls_url_two: {}".format(ls_url_two)) client = MobileRestClient() log_info("Creating databases") ls_db1 = client.create_database(url=ls_url_one, name="ls_db1") ls_db2 = client.create_database(url=ls_url_two, name="ls_db2") bulk_docs = create_docs("test_ls_db1_oneshot", num_docs_per_db) ls_db1_docs_oneshot = client.add_bulk_docs(ls_url_one, ls_db1, bulk_docs) assert len(ls_db1_docs_oneshot) == num_docs_per_db # Setup one shot push replication from LiteServ 1 ls_db1 to LiteServ 2 ls_db2 log_info("Setting up a one-shot push replication from ls_db1 to ls_db2") push_repl = client.start_replication( url=ls_url_one, continuous=False, from_db=ls_db1, to_url=ls_url_two, to_db=ls_db2, ) log_info("Replication ID: {}".format(push_repl)) client.verify_docs_present(url=ls_url_two, db=ls_db2, expected_docs=ls_db1_docs_oneshot) client.verify_docs_in_changes(url=ls_url_two, db=ls_db2, expected_docs=ls_db1_docs_oneshot) # Setup continuous push replication from LiteServ 1 ls_db1 to LiteServ 2 ls_db2 log_info("Setting up a continuous push replication from ls_db1 to ls_db2") push_repl = client.start_replication( url=ls_url_one, continuous=True, from_db=ls_db1, to_url=ls_url_two, to_db=ls_db2, ) log_info("Replication ID: {}".format(push_repl)) ls_db1_docs = client.add_docs(url=ls_url_one, db=ls_db1, number=num_docs_per_db, id_prefix="test_ls_db1") assert len(ls_db1_docs) == num_docs_per_db client.verify_docs_present(url=ls_url_two, db=ls_db2, expected_docs=ls_db1_docs_oneshot + ls_db1_docs) client.verify_docs_in_changes(url=ls_url_two, db=ls_db2, expected_docs=ls_db1_docs_oneshot + ls_db1_docs)
def test_writing_attachment_to_couchbase_server(params_from_base_test_setup, sg_conf_name): """ 1. Start sync_gateway with sync function that rejects all writes: function(doc, oldDoc) { throw({forbidden:"No writes!"}); } 2. Create a doc with attachment 3. Use CBS sdk to see if attachment doc exists. Doc ID will look like _sync:att:sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0= (where the suffix is the digest) 4. Assert att doc does not exist """ cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper = ClusterKeywords() cluster_helper.reset_cluster(cluster_config, sg_conf) topology = cluster_helper.get_cluster_topology(cluster_config) cbs_url = topology["couchbase_servers"][0] sg_url = topology["sync_gateways"][0]["public"] sg_url_admin = topology["sync_gateways"][0]["admin"] sg_db = "db" bucket = "data-bucket" log_info("Running 'test_writing_attachment_to_couchbase_server'") log_info("Using cbs_url: {}".format(cbs_url)) log_info("Using sg_url: {}".format(sg_url)) log_info("Using sg_url_admin: {}".format(sg_url_admin)) log_info("Using sg_db: {}".format(sg_db)) log_info("Using bucket: {}".format(bucket)) sg_user_name = "sg_user" sg_user_password = "******" sg_user_channels = ["NBC"] client = MobileRestClient() client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels) sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name) docs = client.add_docs(url=sg_url, db=sg_db, number=100, id_prefix=sg_db, channels=sg_user_channels, auth=sg_user_session) assert len(docs) == 100 # Create doc with attachment and push to sync_gateway doc_with_att = document.create_doc(doc_id="att_doc", content={"sample_key": "sample_val"}, attachment_name="sample_text.txt", channels=sg_user_channels) client.add_doc(url=sg_url, db=sg_db, doc=doc_with_att, auth=sg_user_session) server = CouchbaseServer(cbs_url) # Assert that the attachment doc gets written to couchbase server server_att_docs = server.get_server_docs_with_prefix(bucket=bucket, prefix="_sync:att:") num_att_docs = len(server_att_docs) assert num_att_docs == 1
def test_peer_2_peer_sanity_pull(setup_p2p_test, num_docs_per_db, seeded_db, attachments_generator): """ 1. Create ls_db1 database on LiteServ One 2. Create ls_db2 database on LiteServ Two 3. Create continuous pull replication LiteServ 1 ls_db1 <- LiteServ 2 ls_db2 4. Add 5000 docs to LiteServ 2 ls_db2 5. Verify all docs replicate to LiteServ 1 ls_db1 6. Verify all docs show up in changes for LiteServ 1 ls_db1 """ ls_url_one = setup_p2p_test["ls_url_one"] ls_url_two = setup_p2p_test["ls_url_two"] log_info("ls_url_one: {}".format(ls_url_one)) log_info("ls_url_two: {}".format(ls_url_two)) attachments = False if attachments_generator: log_info("Running test_peer_2_peer_sanity_pull with attachment {}".format(attachments_generator)) attachments = True client = MobileRestClient() log_info("Creating databases") ls_db1 = client.create_database(url=ls_url_one, name="ls_db1") ls_db2 = client.create_database(url=ls_url_two, name="ls_db2") if seeded_db: bulk_docs = create_docs("test_ls_db2_seed", num_docs_per_db) ls_db2_docs_seed = client.add_bulk_docs(url=ls_url_two, db=ls_db2, docs=bulk_docs) assert len(ls_db2_docs_seed) == num_docs_per_db # Setup continuous pull replication from LiteServ 2 ls_db2 to LiteServ 1 ls_db1 pull_repl = client.start_replication( url=ls_url_one, continuous=True, from_url=ls_url_two, from_db=ls_db2, to_db=ls_db1 ) client.wait_for_replication_status_idle(url=ls_url_one, replication_id=pull_repl) ls_db2_docs = client.add_docs(url=ls_url_two, db=ls_db2, number=num_docs_per_db, id_prefix="test_ls_db2", attachments_generator=attachments_generator) assert len(ls_db2_docs) == num_docs_per_db client.verify_docs_present(url=ls_url_one, db=ls_db1, expected_docs=ls_db2_docs, attachments=attachments) client.verify_docs_in_changes(url=ls_url_one, db=ls_db1, expected_docs=ls_db2_docs) total_ls_db2_docs = ls_db2_docs if seeded_db: total_ls_db2_docs += ls_db2_docs_seed client.verify_docs_present(url=ls_url_one, db=ls_db1, expected_docs=total_ls_db2_docs, attachments=attachments) client.verify_docs_in_changes(url=ls_url_one, db=ls_db1, expected_docs=total_ls_db2_docs)
def test_load_balance_sanity(params_from_base_test_setup): cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf_name = "sync_gateway_default_functional_tests" sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_util = ClusterKeywords() cluster_util.reset_cluster( cluster_config=cluster_config, sync_gateway_config=sg_conf_path ) topology = cluster_util.get_cluster_topology(cluster_config) admin_sg_one = topology["sync_gateways"][0]["admin"] lb_url = "{}:4984".format(topology["load_balancers"][0]) sg_db = "db" num_docs = 1000 sg_user_name = "seth" sg_user_password = "******" channels = ["ABC", "CBS"] client = MobileRestClient() user = client.create_user(admin_sg_one, sg_db, sg_user_name, sg_user_password, channels=channels) session = client.create_session(admin_sg_one, sg_db, sg_user_name) log_info(user) log_info(session) log_info("Adding docs to the load balancer ...") ct = ChangesTracker(url=lb_url, db=sg_db, auth=session) with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor: log_info("Starting ...") ct_task = executor.submit(ct.start) log_info("Adding docs ...") docs = client.add_docs(lb_url, sg_db, num_docs, "test_doc", channels=channels, auth=session) assert len(docs) == num_docs log_info("Adding docs done") wait_for_changes = executor.submit(ct.wait_until, docs) if wait_for_changes.result(): log_info("Stopping ...") log_info("Found all docs ...") executor.submit(ct.stop) ct_task.result() else: executor.submit(ct.stop) ct_task.result() raise Exception("Could not find all changes in feed before timeout!!")
def test_load_balance_sanity(params_from_base_test_setup): cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf_name = "sync_gateway_default_functional_tests" sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_util = ClusterKeywords() cluster_util.reset_cluster( cluster_config=cluster_config, sync_gateway_config=sg_conf_path ) topology = cluster_util.get_cluster_topology(cluster_config) admin_sg_one = topology["sync_gateways"][0]["admin"] lb_url = topology["load_balancers"][0] sg_db = "db" num_docs = 1000 sg_user_name = "seth" sg_user_password = "******" channels = ["ABC", "CBS"] client = MobileRestClient() user = client.create_user(admin_sg_one, sg_db, sg_user_name, sg_user_password, channels=channels) session = client.create_session(admin_sg_one, sg_db, sg_user_name) log_info(user) log_info(session) log_info("Adding docs to the load balancer ...") ct = ChangesTracker(url=lb_url, db=sg_db, auth=session) with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor: log_info("Starting ...") ct_task = executor.submit(ct.start) log_info("Adding docs ...") docs = client.add_docs(lb_url, sg_db, num_docs, "test_doc", channels=channels, auth=session) assert len(docs) == num_docs log_info("Adding docs done") wait_for_changes = executor.submit(ct.wait_until, docs) if wait_for_changes.result(): log_info("Stopping ...") log_info("Found all docs ...") executor.submit(ct.stop) ct_task.result() else: executor.submit(ct.stop) ct_task.result() raise Exception("Could not find all changes in feed before timeout!!")
def test_auto_prune_with_pull(setup_client_syncgateway_test): """Sanity test for autopruning with replication 1. Create a database on LiteServ (ls_db) 2. Add doc to sync gateway 3. Update doc 50 times on sync_gateway 4. Set up pull replication from sync_gateway db to LiteServ db 5. Verify number of revisions on client is default (20) """ cluster_config = setup_client_syncgateway_test["cluster_config"] ls_url = setup_client_syncgateway_test["ls_url"] sg_url = setup_client_syncgateway_test["sg_url"] sg_admin_url = setup_client_syncgateway_test["sg_admin_url"] client = MobileRestClient() sg_helper = SyncGateway() sg_helper.start_sync_gateway( cluster_config=cluster_config, url=sg_url, config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS) ) log_info("Running 'test_auto_prune_listener_sanity' ...") log_info("ls_url: {}".format(ls_url)) log_info("sg_url: {}".format(sg_url)) log_info("sg_admin_url: {}".format(sg_admin_url)) num_docs = 1 num_revs = 50 sg_user_channels = ["NBC"] sg_db = "db" sg_user_name = "sg_user" client.create_user(url=sg_admin_url, db=sg_db, name=sg_user_name, password="******", channels=sg_user_channels) sg_session = client.create_session(url=sg_admin_url, db=sg_db, name=sg_user_name) ls_db = client.create_database(url=ls_url, name="ls_db") sg_db_docs = client.add_docs( url=sg_url, db=sg_db, number=num_docs, id_prefix=sg_db, channels=sg_user_channels, auth=sg_session ) assert len(sg_db_docs) == num_docs sg_docs_update = client.update_docs(url=sg_url, db=sg_db, docs=sg_db_docs, number_updates=num_revs, auth=sg_session) # Start continuous replication ls_db <- sg_db repl_one = client.start_replication(url=ls_url, continuous=True, from_url=sg_admin_url, from_db=sg_db, to_db=ls_db) client.wait_for_replication_status_idle(url=ls_url, replication_id=repl_one) client.verify_docs_present(url=ls_url, db=ls_db, expected_docs=sg_docs_update) client.verify_revs_num_for_docs(url=ls_url, db=ls_db, docs=sg_docs_update, expected_revs_per_doc=20)
def test_ios_full_life_cycle(request, liteserv_with_storage_engine_from_fixture): liteserv = liteserv_with_storage_engine_from_fixture test_name = request.node.name logfile = "{}/logs/{}-{}-{}.txt".format(RESULTS_DIR, type(liteserv).__name__, test_name, datetime.datetime.now()) ls_url = liteserv.start(logfile) client = MobileRestClient() client.create_database(ls_url, "ls_db") docs = client.add_docs(ls_url, db="ls_db", number=10, id_prefix="test_doc") assert len(docs) == 10 client.delete_databases(ls_url) liteserv.stop()
def test_net_msft_full_life_cycle(request, liteserv_with_storage_engine_from_fixture): liteserv = liteserv_with_storage_engine_from_fixture test_name = request.node.name logfile = "{}/logs/{}-{}-{}.txt".format(RESULTS_DIR, type(liteserv).__name__, test_name, datetime.datetime.now()) ls_url = liteserv.start(logfile) client = MobileRestClient() client.create_database(ls_url, "ls_db") docs = client.add_docs(ls_url, db="ls_db", number=10, id_prefix="test_doc") assert len(docs) == 10 client.delete_databases(ls_url) liteserv.stop()
def test_peer_2_peer_sanity_pull(setup_p2p_test): """ 1. Create ls_db1 database on LiteServ One 2. Create ls_db2 database on LiteServ Two 3. Create continuous pull replication LiteServ 1 ls_db1 <- LiteServ 2 ls_db2 4. Add 5000 docs to LiteServ 2 ls_db2 5. Verify all docs replicate to LiteServ 1 ls_db1 6. Verify all docs show up in changes for LiteServ 1 ls_db1 """ ls_url_one = setup_p2p_test["ls_url_one"] ls_url_two = setup_p2p_test["ls_url_two"] num_docs_per_db = 5000 log_info("ls_url_one: {}".format(ls_url_one)) log_info("ls_url_two: {}".format(ls_url_two)) client = MobileRestClient() log_info("Creating databases") ls_db1 = client.create_database(url=ls_url_one, name="ls_db1") ls_db2 = client.create_database(url=ls_url_two, name="ls_db2") # Setup continuous pull replication from LiteServ 2 ls_db2 to LiteServ 1 ls_db1 pull_repl = client.start_replication( url=ls_url_one, continuous=True, from_url=ls_url_two, from_db=ls_db2, to_db=ls_db1 ) client.wait_for_replication_status_idle(url=ls_url_one, replication_id=pull_repl) ls_db2_docs = client.add_docs(url=ls_url_two, db=ls_db2, number=num_docs_per_db, id_prefix="test_ls_db2") assert len(ls_db2_docs) == num_docs_per_db client.verify_docs_present(url=ls_url_one, db=ls_db1, expected_docs=ls_db2_docs) client.verify_docs_in_changes(url=ls_url_one, db=ls_db1, expected_docs=ls_db2_docs)
def test_rebalance_sanity(params_from_base_test_setup): cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_version = get_sg_version(cluster_config) if compare_versions(sg_version, '1.5') < 0: pytest.skip("This test needs multiple URLs in the SG config, not supported by SG < 1.5") cluster_helper = ClusterKeywords() sg_conf_name = "sync_gateway_default_functional_tests" sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper.reset_cluster(cluster_config=cluster_config, sync_gateway_config=sg_conf_path) topology = cluster_helper.get_cluster_topology(cluster_config) admin_sg_one = topology["sync_gateways"][0]["admin"] sg_one_url = topology["sync_gateways"][0]["public"] cluster_servers = topology["couchbase_servers"] cbs_one_url = cluster_servers[0] cbs_two_url = cluster_servers[1] log_info("Running: 'test_distributed_index_rebalance_sanity'") log_info("cluster_config: {}".format(cluster_config)) log_info("admin_sg: {}".format(admin_sg_one)) log_info("sg_url: {}".format(sg_one_url)) log_info("cbs_one_url: {}".format(cbs_one_url)) log_info("cbs_two_url: {}".format(cbs_two_url)) sg_db = "db" num_docs = 100 num_updates = 100 sg_user_name = "seth" sg_user_password = "******" channels = ["ABC", "CBS"] client = MobileRestClient() cb_server = couchbaseserver.CouchbaseServer(cbs_one_url) server_to_remove = couchbaseserver.CouchbaseServer(cbs_two_url) client.create_user(admin_sg_one, sg_db, sg_user_name, sg_user_password, channels=channels) session = client.create_session(admin_sg_one, sg_db, sg_user_name) with concurrent.futures.ThreadPoolExecutor(5) as executor: # Add docs to sg log_info("Adding docs to sync_gateway") docs = client.add_docs(sg_one_url, sg_db, num_docs, "test_doc", channels=channels, auth=session) assert len(docs) == num_docs # Start updating docs and rebalance out one CBS node log_info("Updating docs on sync_gateway") update_docs_task = executor.submit(client.update_docs, sg_one_url, sg_db, docs, num_updates, auth=session) # Run rebalance in background cb_server.rebalance_out(cluster_servers, server_to_remove) updated_docs = update_docs_task.result() log_info(updated_docs) # Verify docs / revisions present client.verify_docs_present(sg_one_url, sg_db, updated_docs, auth=session) # Verify docs revisions in changes feed client.verify_docs_in_changes(sg_one_url, sg_db, updated_docs, auth=session) # Rebalance Server back in to the pool cb_server.add_node(server_to_remove) cb_server.rebalance_in(cluster_servers, server_to_remove)
def test_replication_with_multiple_client_dbs_and_single_sync_gateway_db(setup_client_syncgateway_test): """Test replication from multiple client dbs to one sync_gateway db""" cluster_config = setup_client_syncgateway_test["cluster_config"] ls_url = setup_client_syncgateway_test["ls_url"] sg_url = setup_client_syncgateway_test["sg_url"] sg_admin_url = setup_client_syncgateway_test["sg_admin_url"] num_docs = 1000 sg_helper = SyncGateway() sg_helper.start_sync_gateway( cluster_config=cluster_config, url=sg_url, config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS) ) log_info("Running 'test_replication_with_multiple_client_dbs_and_single_sync_gateway_db'") log_info("ls_url: {}".format(ls_url)) log_info("sg_admin_url: {}".format(sg_admin_url)) log_info("sg_url: {}".format(sg_url)) client = MobileRestClient() ls_db1 = client.create_database(url=ls_url, name="ls_db1") ls_db2 = client.create_database(url=ls_url, name="ls_db2") sg_db = client.create_database(url=sg_admin_url, name="sg_db", server="walrus:") # Setup continuous push / pull replication from ls_db1 to sg_db client.start_replication( url=ls_url, continuous=True, from_db=ls_db1, to_url=sg_admin_url, to_db=sg_db ) client.start_replication( url=ls_url, continuous=True, from_url=sg_admin_url, from_db=sg_db, to_db=ls_db1 ) # Setup continuous push / pull replication from ls_db2 to sg_db client.start_replication( url=ls_url, continuous=True, from_db=ls_db2, to_url=sg_admin_url, to_db=sg_db ) client.start_replication( url=ls_url, continuous=True, from_url=sg_admin_url, from_db=sg_db, to_db=ls_db2 ) ls_db_one_docs = client.add_docs(url=ls_url, db=ls_db1, number=num_docs, id_prefix=ls_db1) assert len(ls_db_one_docs) == 1000 ls_db_two_docs = client.add_docs(url=ls_url, db=ls_db2, number=num_docs, id_prefix=ls_db2) assert len(ls_db_two_docs) == 1000 ls_db1_db2_docs = ls_db_one_docs + ls_db_two_docs client.verify_docs_present(url=ls_url, db=ls_db1, expected_docs=ls_db1_db2_docs) client.verify_docs_present(url=ls_url, db=ls_db2, expected_docs=ls_db1_db2_docs) client.verify_docs_present(url=sg_admin_url, db=sg_db, expected_docs=ls_db1_db2_docs) client.verify_docs_in_changes(url=sg_admin_url, db=sg_db, expected_docs=ls_db1_db2_docs) client.verify_docs_in_changes(url=ls_url, db=ls_db1, expected_docs=ls_db1_db2_docs) client.verify_docs_in_changes(url=ls_url, db=ls_db2, expected_docs=ls_db1_db2_docs)
def test_take_down_bring_up_sg_accel_validate_cbgt(params_from_base_test_setup, sg_conf): """ Scenario 1 Start with 3 sg_accels Take down 2 sg_accels (block until down -- poll port if needed) Doc adds with uuids (~30 sec for cbgt to reshard) polling loop: wait for all docs to come back over changes feed Call validate pindex with correct number of accels Scenario 2 (Continuation) When bringing up, you'd have to poll the cbgt_cfg until you get expected number of nodes, then you could validate the pindex with 2 accels """ cluster_conf = params_from_base_test_setup["cluster_config"] log_info("Running 'test_dcp_reshard_single_sg_accel_goes_down_and_up'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) cluster_util = ClusterKeywords() topology = cluster_util.get_cluster_topology(cluster_conf) sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" client = MobileRestClient() doc_pusher_user_info = userinfo.UserInfo("doc_pusher", "pass", channels=["A"], roles=[]) doc_pusher_auth = client.create_user( url=sg_admin_url, db=sg_db, name=doc_pusher_user_info.name, password=doc_pusher_user_info.password, channels=doc_pusher_user_info.channels) log_info("Shutting down sg_accels: [{}, {}]".format( cluster.sg_accels[1], cluster.sg_accels[2])) # Shutdown two accel nodes in parallel with concurrent.futures.ThreadPoolExecutor(max_workers=3) as ex: sg_accel_down_task_1 = ex.submit(cluster.sg_accels[1].stop) sg_accel_down_task_2 = ex.submit(cluster.sg_accels[2].stop) assert sg_accel_down_task_1.result() == 0 assert sg_accel_down_task_2.result() == 0 log_info("Finished taking nodes down!") # It should take some time ~30 for cbgt to pick up failing nodes and reshard the pindexes. During # this add a 1000 docs a start a longpoll changes loop to see if those docs make to to the changes feed # If the reshard is successful they will show up at somepoint after. If not, the docs will fail to show up. doc_pusher_docs = client.add_docs(url=sg_url, db=sg_db, number=1000, id_prefix=None, auth=doc_pusher_auth, channels=doc_pusher_user_info.channels) assert len(doc_pusher_docs) == 1000 client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=doc_pusher_docs, auth=doc_pusher_auth, polling_interval=5) # The pindexes should be reshared at this point since all of the changes have shown up assert cluster.validate_cbgt_pindex_distribution(num_running_sg_accels=1) log_info("Start sg_accels: [{}, {}]".format(cluster.sg_accels[1], cluster.sg_accels[2])) # Start two accel nodes in parallel status = cluster.sg_accels[1].start(sg_conf) assert status == 0 # Poll on pIndex reshard after bring 2 accel nodes back assert cluster.validate_cbgt_pindex_distribution_retry( num_running_sg_accels=2) status = cluster.sg_accels[2].start(sg_conf) assert status == 0 # Poll on pIndex reshard after bring 2 accel nodes back assert cluster.validate_cbgt_pindex_distribution_retry( num_running_sg_accels=3)
def test_initial_push_replication(setup_client_syncgateway_test, continuous): """ 1. Prepare LiteServ to have 10000 documents. 2. Create a single shot push / continuous replicator and to push the docs into a sync_gateway database. 3. Verify if all of the docs get pushed. """ sg_db = "db" ls_db = "ls_db" seth_channels = ["ABC", "NBC"] num_docs = 10000 cluster_config = setup_client_syncgateway_test["cluster_config"] ls_url = setup_client_syncgateway_test["ls_url"] sg_one_admin = setup_client_syncgateway_test["sg_admin_url"] sg_one_public = setup_client_syncgateway_test["sg_url"] sg_helper = SyncGateway() sg_helper.start_sync_gateway( cluster_config=cluster_config, url=sg_one_public, config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS) ) log_info("Running 'test_initial_push_replication', continuous: {}".format(continuous)) log_info("ls_url: {}".format(ls_url)) log_info("sg_one_admin: {}".format(sg_one_admin)) log_info("sg_one_public: {}".format(sg_one_public)) client = MobileRestClient() client.create_user(sg_one_admin, sg_db, "seth", password="******", channels=seth_channels) session = client.create_session(sg_one_admin, sg_db, "seth") client.create_database(url=ls_url, name=ls_db) # Create 'num_docs' docs on LiteServ docs = client.add_docs( url=ls_url, db=ls_db, number=num_docs, id_prefix="seeded_doc", generator="four_k", channels=seth_channels ) assert len(docs) == num_docs # Start push replication repl_id = client.start_replication( url=ls_url, continuous=continuous, from_db=ls_db, to_url=sg_one_admin, to_db=sg_db ) if continuous: log_info("Waiting for replication status 'Idle' for: {}".format(repl_id)) client.wait_for_replication_status_idle(ls_url, repl_id) else: log_info("Waiting for no replications: {}".format(repl_id)) client.wait_for_no_replications(ls_url) # Verify docs replicated to sync_gateway client.verify_docs_present(url=sg_one_public, db=sg_db, expected_docs=docs, auth=session) # Verify docs show up in sync_gateway's changes feed client.verify_docs_in_changes(url=sg_one_public, db=sg_db, expected_docs=docs, auth=session) replications = client.get_replications(url=ls_url) if continuous: assert len(replications) == 1, "There should only be one replication running" assert replications[0]["status"] == "Idle", "Replication Status should be 'Idle'" assert replications[0]["continuous"], "Running replication should be continuous" # Only .NET has an 'error' property if "error" in replications[0]: assert len(replications[0]["error"]) == 0 else: assert len(replications) == 0, "No replications should be running"
def test_replication_with_session_cookie(setup_client_syncgateway_test): """Regression test for https://github.com/couchbase/couchbase-lite-android/issues/817 1. SyncGateway Config with guest disabled = true and One user added (e.g. user1 / 1234) 2. Create a new session on SGW for the user1 by using POST /_session. Capture the SyncGatewaySession cookie from the set-cookie in the response header. 3. Start continuous push and pull replicator on the LiteServ with SyncGatewaySession cookie. Make sure that both replicators start correctly 4. Delete the session from SGW by sending DELETE /_sessions/ to SGW 5. Cancel both push and pull replicator on the LiteServ 6. Repeat step 1 and 2 """ ls_db = "ls_db" sg_db = "db" cluster_config = setup_client_syncgateway_test["cluster_config"] ls_url = setup_client_syncgateway_test["ls_url"] sg_url = setup_client_syncgateway_test["sg_url"] sg_admin_url = setup_client_syncgateway_test["sg_admin_url"] sg_helper = SyncGateway() sg_helper.start_sync_gateway( cluster_config=cluster_config, url=sg_url, config="{}/walrus-user.json".format(SYNC_GATEWAY_CONFIGS) ) log_info("Running 'test_replication_with_session_cookie'") log_info("ls_url: {}".format(ls_url)) log_info("sg_admin_url: {}".format(sg_admin_url)) log_info("sg_url: {}".format(sg_url)) client = MobileRestClient() client.create_database(url=ls_url, name=ls_db) # Get session header for user_1 session_header = client.create_session_header(url=sg_url, db=sg_db, name="user_1", password="******") # Get session id from header session_parts = re.split("=|;", session_header) session_id = session_parts[1] log_info("{}: {}".format(session_parts[0], session_id)) session = (session_parts[0], session_id) # Start authenticated push replication repl_one = client.start_replication( url=ls_url, continuous=True, from_db=ls_db, to_url=sg_url, to_db=sg_db, to_auth=session_header ) # Start authenticated pull replication repl_two = client.start_replication( url=ls_url, continuous=True, from_url=sg_url, from_db=sg_db, from_auth=session_header, to_db=ls_db, ) # Wait for 2 replications to be 'Idle', On .NET they may not be immediately available via _active_tasks client.wait_for_replication_status_idle(ls_url, repl_one) client.wait_for_replication_status_idle(ls_url, repl_two) replications = client.get_replications(ls_url) assert len(replications) == 2, "2 replications (push / pull should be running)" num_docs_pushed = 100 # Sanity test docs ls_docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs_pushed, id_prefix="ls_doc", channels=["ABC"]) assert len(ls_docs) == num_docs_pushed sg_docs = client.add_docs(url=sg_url, db=sg_db, number=num_docs_pushed, id_prefix="sg_doc", auth=session, channels=["ABC"]) assert len(sg_docs) == num_docs_pushed all_docs = client.merge(ls_docs, sg_docs) log_info(all_docs) client.verify_docs_present(url=sg_admin_url, db=sg_db, expected_docs=all_docs) client.verify_docs_present(url=ls_url, db=ls_db, expected_docs=all_docs) # GET from session endpoint /{db}/_session/{session-id} session = client.get_session(url=sg_admin_url, db=sg_db, session_id=session_id) assert len(session["userCtx"]["channels"]) == 2, "There should be only 2 channels for the user" assert "ABC" in session["userCtx"]["channels"], "The channel info should contain 'ABC'" assert session["userCtx"]["name"] == "user_1", "The user should have the name 'user_1'" assert len(session["authentication_handlers"]) == 2, "There should be 2 authentication_handlers" assert "default" in session["authentication_handlers"], "Did not find 'default' in authentication_headers" assert "cookie" in session["authentication_handlers"], "Did not find 'cookie' in authentication_headers" log_info("SESSIONs: {}".format(session)) # Delete session via sg admin port and _user rest endpoint client.delete_session(url=sg_admin_url, db=sg_db, user_name="user_1", session_id=session_id) # Make sure session is deleted try: session = client.get_session(url=sg_admin_url, db=sg_db, session_id=session_id) except HTTPError as he: expected_error_code = he.response.status_code log_info(expected_error_code) assert expected_error_code == 404, "Expected 404 status, actual {}".format(expected_error_code) # Cancel the replications # Stop repl_one client.stop_replication( url=ls_url, continuous=True, from_db=ls_db, to_url=sg_url, to_db=sg_db, to_auth=session_header ) # Stop repl_two client.stop_replication( url=ls_url, continuous=True, from_url=sg_url, from_db=sg_db, from_auth=session_header, to_db=ls_db, ) client.wait_for_no_replications(ls_url) replications = client.get_replications(ls_url) assert len(replications) == 0, "All replications should be stopped" # Create new session and new push / pull replications session_header = client.create_session_header(url=sg_url, db=sg_db, name="user_1", password="******") # Get session id from header session_parts = re.split("=|;", session_header) session_id = session_parts[1] log_info("{}: {}".format(session_parts[0], session_id)) # Start authenticated push replication repl_one = client.start_replication( url=ls_url, continuous=True, from_db=ls_db, to_url=sg_url, to_db=sg_db, to_auth=session_header ) # Start authenticated pull replication repl_two = client.start_replication( url=ls_url, continuous=True, from_url=sg_url, from_db=sg_db, from_auth=session_header, to_db=ls_db, ) replications = client.get_replications(ls_url) assert len(replications) == 2, "2 replications (push / pull should be running), found: {}".format(2) session = client.get_session(url=sg_admin_url, db=sg_db, session_id=session_id) assert len(session["userCtx"]["channels"]) == 2, "There should be only 2 channels for the user" assert "ABC" in session["userCtx"]["channels"], "The channel info should contain 'ABC'" assert session["userCtx"]["name"] == "user_1", "The user should have the name 'user_1'" assert len(session["authentication_handlers"]) == 2, "There should be 2 authentication_handlers" assert "default" in session["authentication_handlers"], "Did not find 'default' in authentication_headers" assert "cookie" in session["authentication_handlers"], "Did not find 'cookie' in authentication_headers" log_info("SESSIONs: {}".format(session)) # Delete session via sg admin port and db rest endpoint client.delete_session(url=sg_admin_url, db=sg_db, session_id=session_id) # Make sure session is deleted try: session = client.get_session(url=sg_admin_url, db=sg_db, session_id=session_id) except HTTPError as he: expected_error_code = he.response.status_code log_info(expected_error_code) assert expected_error_code == 404, "Expected 404 status, actual {}".format(expected_error_code)
def test_peer_2_peer_sanity(setup_p2p_test): """ 1. Sanity P2P Scenario 2. Launch LiteServ 1 and LiteServ 2 3. Create a database on each LiteServ 4. Start continuous push pull replication from each db to the other 5. Add docs to each db 6. Verify the docs show up at each db 7. Verify the docs show up in the database's changes feed. """ ls_url_one = setup_p2p_test["ls_url_one"] ls_url_two = setup_p2p_test["ls_url_two"] num_docs_per_db = 1000 log_info("ls_url_one: {}".format(ls_url_one)) log_info("ls_url_two: {}".format(ls_url_two)) client = MobileRestClient() log_info("Creating databases") ls_db1 = client.create_database(url=ls_url_one, name="ls_db1") ls_db2 = client.create_database(url=ls_url_two, name="ls_db2") # Setup continuous push / pull replication from LiteServ 1 ls_db1 to LiteServ 2 ls_db2 repl_one = client.start_replication( url=ls_url_one, continuous=True, from_db=ls_db1, to_url=ls_url_two, to_db=ls_db2 ) repl_two = client.start_replication( url=ls_url_one, continuous=True, from_url=ls_url_two, from_db=ls_db2, to_db=ls_db1 ) # Setup continuous push / pull replication from LiteServ 2 ls_db2 to LiteServ 1 ls_db1 repl_three = client.start_replication( url=ls_url_two, continuous=True, from_db=ls_db2, to_url=ls_url_one, to_db=ls_db1 ) repl_four = client.start_replication( url=ls_url_two, continuous=True, from_url=ls_url_one, from_db=ls_db1, to_db=ls_db2 ) client.wait_for_replication_status_idle(url=ls_url_one, replication_id=repl_one) client.wait_for_replication_status_idle(url=ls_url_one, replication_id=repl_two) client.wait_for_replication_status_idle(url=ls_url_two, replication_id=repl_three) client.wait_for_replication_status_idle(url=ls_url_two, replication_id=repl_four) ls_url_one_replications = client.get_replications(ls_url_one) assert len(ls_url_one_replications) == 2 ls_url_two_replications = client.get_replications(ls_url_two) assert len(ls_url_two_replications) == 2 ls_db1_docs = client.add_docs(url=ls_url_one, db=ls_db1, number=num_docs_per_db, id_prefix="test_ls_db1") assert len(ls_db1_docs) == num_docs_per_db ls_db2_docs = client.add_docs(url=ls_url_two, db=ls_db2, number=num_docs_per_db, id_prefix="test_ls_db2") assert len(ls_db2_docs) == num_docs_per_db all_docs = client.merge(ls_db1_docs, ls_db2_docs) assert len(all_docs) == 2000 client.verify_docs_present(url=ls_url_one, db=ls_db1, expected_docs=all_docs) client.verify_docs_present(url=ls_url_two, db=ls_db2, expected_docs=all_docs) client.verify_docs_in_changes(url=ls_url_one, db=ls_db1, expected_docs=all_docs) client.verify_docs_in_changes(url=ls_url_two, db=ls_db2, expected_docs=all_docs)
def test_backfill_channels_oneshot_changes(params_from_base_test_setup, sg_conf_name, grant_type): cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] mode = params_from_base_test_setup["mode"] sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" log_info("grant_type: {}".format(grant_type)) sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster = Cluster(cluster_config) cluster.reset(sg_conf) client = MobileRestClient() admin_user_info = userinfo.UserInfo("admin", "pass", channels=["A"], roles=[]) user_b_user_info = userinfo.UserInfo("USER_B", "pass", channels=["B"], roles=[]) # Create users / sessions client.create_user(url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password, channels=admin_user_info.channels) client.create_user(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, password=user_b_user_info.password, channels=user_b_user_info.channels) admin_session = client.create_session(url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password) user_b_session = client.create_session(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, password=user_b_user_info.password) # Create 50 "A" channel docs a_docs = client.add_docs(url=sg_url, db=sg_db, number=50, id_prefix=None, auth=admin_session, channels=["A"]) assert len(a_docs) == 50 b_docs = client.add_docs(url=sg_url, db=sg_db, number=1, id_prefix="b_doc", auth=user_b_session, channels=["B"]) assert len(b_docs) == 1 user_doc = {"id": "_user/USER_B", "rev": None} b_docs.append(user_doc) # Loop until user_b sees b_doc_0 doc and _user/USER_B doc client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=b_docs, auth=user_b_session, strict=True) # Get last_seq for user_b user_b_changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=user_b_session, feed="normal") # Grant access to channel "A" if grant_type == "CHANNEL-REST": log_info("Granting user access to channel A via Admin REST user update") # Grant via update to user in Admin API client.update_user(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, channels=["A", "B"]) elif grant_type == "CHANNEL-SYNC": log_info("Granting user access to channel A sync function access()") # Grant via access() in sync_function, then id 'channel_access' will trigger an access(doc.users, doc.channels) access_doc = document.create_doc("channel_access", channels=["A"]) access_doc["users"] = ["USER_B"] client.add_doc(url=sg_url, db=sg_db, doc=access_doc, auth=admin_session) elif grant_type == "ROLE-REST": log_info("Granting user access to channel A via Admin REST role grant") # Create role with channel A client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"]) client.update_user(url=sg_admin_url, db=sg_db, name="USER_B", roles=["channel-A-role"]) elif grant_type == "ROLE-SYNC": log_info("Granting user access to channel A via sync function role() grant") # Create role with channel A client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"]) # Grant via role() in sync_function, then id 'role_access' will trigger an role(doc.users, doc.roles) role_access_doc = document.create_doc("role_access") role_access_doc["users"] = ["USER_B"] role_access_doc["roles"] = ["role:channel-A-role"] client.add_doc(sg_url, db=sg_db, doc=role_access_doc, auth=admin_session) else: pytest.fail("Unsupported grant_type!!!!") user_b_changes_after_grant = client.get_changes(url=sg_url, db=sg_db, since=user_b_changes["last_seq"], auth=user_b_session, feed="normal") # User B shoud have recieved 51 docs (a_docs + 1 _user/USER_B doc) if a REST grant or 50 changes if the grant # is via the sync function changes_results = user_b_changes_after_grant["results"] assert 50 <= len(changes_results) <= 51 # Create a dictionary of id rev pair of all the docs that are not "_user/" docs from changes ids_and_revs_from_user_changes = { change["id"]: change["changes"][0]["rev"] for change in changes_results if not change["id"].startswith("_user/") } assert len(ids_and_revs_from_user_changes) == 50 # Create a list of id rev pair of all of the channel A docs ids_and_revs_from_a_docs = {doc["id"]: doc["rev"] for doc in a_docs} assert len(ids_and_revs_from_a_docs) == 50 # Check that the changes and the a_docs are identical in id and rev assert ids_and_revs_from_user_changes == ids_and_revs_from_a_docs # Get changes from last_seq of the changes request after the grant. There should be no new changes user_b_changes = client.get_changes(url=sg_url, db=sg_db, since=user_b_changes_after_grant["last_seq"], auth=user_b_session, feed="normal") assert len(user_b_changes["results"]) == 0
def test_backfill_channels_oneshot_limit_changes(params_from_base_test_setup, sg_conf_name, grant_type): """ Test that checks that docs are backfilled for one shot changes with limit for a access grant (via REST or SYNC) CHANNEL-REST = Channel is granted to user via REST CHANNEL-SYNC = Channel is granted to user via sync function access() ROLE-REST = Role is granted to user via REST ROLE-SYNC = Role is granted to user via sync function role() CHANNEL-TO-ROLE-REST = Channel is added to existing role via REST CHANNEL-TO-ROLE-SYNC = Channel is added to existing role via sync access() """ cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] mode = params_from_base_test_setup["mode"] sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" log_info("grant_type: {}".format(grant_type)) sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster = Cluster(cluster_config) cluster.reset(sg_conf) client = MobileRestClient() admin_user_info = userinfo.UserInfo("admin", "pass", channels=["A"], roles=[]) if grant_type == "CHANNEL-TO-ROLE-REST" or grant_type == "CHANNEL-TO-ROLE-SYNC": client.create_role(url=sg_admin_url, db=sg_db, name="empty_role", channels=[]) user_b_user_info = userinfo.UserInfo("USER_B", "pass", channels=["B"], roles=["empty_role"]) else: user_b_user_info = userinfo.UserInfo("USER_B", "pass", channels=["B"], roles=[]) # Create users / sessions client.create_user(url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password, channels=admin_user_info.channels) client.create_user(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, password=user_b_user_info.password, channels=user_b_user_info.channels, roles=user_b_user_info.roles) admin_session = client.create_session(url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password) user_b_session = client.create_session(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, password=user_b_user_info.password) # Create 50 "A" channel docs a_docs = client.add_docs(url=sg_url, db=sg_db, number=50, id_prefix=None, auth=admin_session, channels=["A"]) assert len(a_docs) == 50 b_docs = client.add_docs(url=sg_url, db=sg_db, number=1, id_prefix="b_doc", auth=user_b_session, channels=["B"]) assert len(b_docs) == 1 # Loop until admin user sees docs in changes client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=a_docs, auth=admin_session) user_doc = {"id": "_user/USER_B", "rev": None} b_docs.append(user_doc) # Loop until user_b sees b_doc_0 doc and _user/USER_B doc client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=b_docs, auth=user_b_session, strict=True) # Get last_seq for user_b user_b_changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=user_b_session, feed="normal") # Grant access to channel "A" if grant_type == "CHANNEL-REST": log_info( "Granting user access to channel A via Admin REST user update") # Grant via update to user in Admin API client.update_user(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, channels=["A", "B"]) elif grant_type == "CHANNEL-SYNC": log_info("Granting user access to channel A sync function access()") # Grant via access() in sync_function, then id 'channel_access' will trigger an access(doc.users, doc.channels) access_doc = document.create_doc("channel_access", channels=["A"]) access_doc["users"] = ["USER_B"] client.add_doc(url=sg_url, db=sg_db, doc=access_doc, auth=admin_session) elif grant_type == "ROLE-REST": log_info("Granting user access to channel A via Admin REST role grant") # Create role with channel A client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"]) client.update_user(url=sg_admin_url, db=sg_db, name="USER_B", roles=["channel-A-role"]) elif grant_type == "ROLE-SYNC": log_info( "Granting user access to channel A via sync function role() grant") # Create role with channel A client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"]) # Grant via role() in sync_function, then id 'role_access' will trigger an role(doc.users, doc.roles) role_access_doc = document.create_doc("role_access") role_access_doc["users"] = ["USER_B"] role_access_doc["roles"] = ["role:channel-A-role"] client.add_doc(sg_url, db=sg_db, doc=role_access_doc, auth=admin_session) elif grant_type == "CHANNEL-TO-ROLE-REST": # Update the empty_role to have channel "A" client.update_role(url=sg_admin_url, db=sg_db, name="empty_role", channels=["A"]) elif grant_type == "CHANNEL-TO-ROLE-SYNC": # Grant empty_role access to channel "A" via sync function # Grant channel access to role via sync function access_doc = document.create_doc("channel_grant_to_role") access_doc["roles"] = ["role:empty_role"] access_doc["channels"] = ["A"] client.add_doc(url=sg_url, db=sg_db, doc=access_doc, auth=admin_session, use_post=True) else: pytest.fail("Unsupported grant_type!!!!") # Create a dictionary keyed on doc id for all of channel A docs ids_and_revs_from_a_docs = {doc["id"]: doc["rev"] for doc in a_docs} assert len(ids_and_revs_from_a_docs.keys()) == 50 log_info("Doing 3, 1 shot changes with limit and last seq!") # Issue 3 oneshot changes with a limit of 20 # Issue one shot changes to make sure access grant is successful, the change may not propagate immediately so retry. num_retries = 3 count = 0 while True: if count == num_retries: raise exceptions.ChangesError( "Didn't get all expected changes before timing out!") user_b_changes_after_grant_one = client.get_changes( url=sg_url, db=sg_db, since=user_b_changes["last_seq"], auth=user_b_session, feed="normal", limit=20) if len(user_b_changes_after_grant_one["results"]) > 0: # Found changes, break out an validate changes! break time.sleep(1) count += 1 ################# # Changes Req #1 ################# # Expect a user doc in the changes if grant_type == "CHANNEL-REST" or grant_type == "ROLE-REST": ids_and_revs_from_a_docs["_user/USER_B"] = None # User B shoud have recieved 20 docs due to limit assert len(user_b_changes_after_grant_one["results"]) == 20 for doc in user_b_changes_after_grant_one["results"]: # cross off keys found from 'a_docs' dictionary del ids_and_revs_from_a_docs[doc["id"]] ################# # Changes Req #2 ################# user_b_changes_after_grant_two = client.get_changes( url=sg_url, db=sg_db, since=user_b_changes_after_grant_one["last_seq"], auth=user_b_session, feed="normal", limit=20) # User B shoud have recieved 20 docs due to limit assert len(user_b_changes_after_grant_two["results"]) == 20 for doc in user_b_changes_after_grant_two["results"]: # cross off keys found from 'a_docs' dictionary del ids_and_revs_from_a_docs[doc["id"]] ################# # Changes Req #3 ################# user_b_changes_after_grant_three = client.get_changes( url=sg_url, db=sg_db, since=user_b_changes_after_grant_two["last_seq"], auth=user_b_session, feed="normal", limit=20) # User B should have recieved 10 docs due to limit or 11 docs with with a terminating _user doc # The terminating user doc only happens with a grant via REST if grant_type == "CHANNEL-REST" or grant_type == "ROLE-REST": assert len(user_b_changes_after_grant_three["results"]) == 11 else: assert len(user_b_changes_after_grant_three["results"]) == 10 for doc in user_b_changes_after_grant_three["results"]: # cross off non user doc keys found from 'a_docs' dictionary del ids_and_revs_from_a_docs[doc["id"]] # Make sure all the docs have been crossed out assert len(ids_and_revs_from_a_docs) == 0 ################# # Changes Req #4 ################# user_b_changes_after_grant_four = client.get_changes( url=sg_url, db=sg_db, since=user_b_changes_after_grant_three["last_seq"], auth=user_b_session, feed="normal", limit=20) # Changes should be caught up and there should be no results assert len(user_b_changes_after_grant_four["results"]) == 0
def test_longpoll_awaken_roles(params_from_base_test_setup, sg_conf_name): cluster_conf = params_from_base_test_setup["cluster_config"] cluster_topology = params_from_base_test_setup["cluster_topology"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) sg_admin_url = cluster_topology["sync_gateways"][0]["admin"] sg_url = cluster_topology["sync_gateways"][0]["public"] log_info("sg_conf: {}".format(sg_conf)) log_info("sg_admin_url: {}".format(sg_admin_url)) log_info("sg_url: {}".format(sg_url)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) admin_role = "admin_role" admin_channel = "admin_channel" admin_user_info = userinfo.UserInfo(name="admin", password="******", channels=[], roles=[admin_role]) adam_user_info = userinfo.UserInfo(name="adam", password="******", channels=[], roles=[]) traun_user_info = userinfo.UserInfo(name="traun", password="******", channels=[], roles=[]) andy_user_info = userinfo.UserInfo(name="andy", password="******", channels=[], roles=[]) sg_db = "db" client = MobileRestClient() # Create a role on sync_gateway client.create_role(url=sg_admin_url, db=sg_db, name=admin_role, channels=[admin_channel]) # Create users with no channels or roles admin_auth = client.create_user(url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password, roles=[admin_role]) adam_auth = client.create_user(url=sg_admin_url, db=sg_db, name=adam_user_info.name, password=adam_user_info.password) traun_auth = client.create_user(url=sg_admin_url, db=sg_db, name=traun_user_info.name, password=traun_user_info.password) andy_auth = client.create_user(url=sg_admin_url, db=sg_db, name=andy_user_info.name, password=andy_user_info.password) ################################ # change feed wakes for role add ################################ # Get starting sequence of docs, use the last seq to progress past any _user docs. adam_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=adam_auth) traun_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=traun_auth) andy_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=andy_auth) # Add doc with channel associated with the admin role admin_doc = client.add_docs(url=sg_url, db=sg_db, number=1, id_prefix="admin_doc", auth=admin_auth, channels=[admin_channel]) client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=admin_doc, auth=admin_auth) with concurrent.futures.ProcessPoolExecutor() as ex: # Start changes feed for 3 users from latest last_seq adam_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=adam_changes["last_seq"], timeout=10, auth=adam_auth) traun_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=traun_changes["last_seq"], timeout=10, auth=traun_auth) andy_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=andy_changes["last_seq"], timeout=10, auth=andy_auth) # Wait for changes feed to notice there are no changes and enter wait. 2 seconds should be more than enough time.sleep(2) # Make sure the changes future is still running and has not exited due to any new changes, the feed should be caught up # and waiting assert not adam_changes_task.done() assert not traun_changes_task.done() assert not andy_changes_task.done() adam_auth = client.update_user(url=sg_admin_url, db=sg_db, name=adam_user_info.name, password=adam_user_info.password, roles=[admin_role]) traun_auth = client.update_user(url=sg_admin_url, db=sg_db, name=traun_user_info.name, password=traun_user_info.password, roles=[admin_role]) andy_auth = client.update_user(url=sg_admin_url, db=sg_db, name=andy_user_info.name, password=andy_user_info.password, roles=[admin_role]) adam_changes = adam_changes_task.result() assert 1 <= len(adam_changes["results"]) <= 2 assert adam_changes["results"][0]["id"] == "admin_doc_0" or adam_changes["results"][0]["id"] == "_user/adam" traun_changes = traun_changes_task.result() assert 1 <= len(traun_changes["results"]) <= 2 assert traun_changes["results"][0]["id"] == "admin_doc_0" or traun_changes["results"][0]["id"] == "_user/traun" andy_changes = andy_changes_task.result() assert 1 <= len(andy_changes["results"]) <= 2 assert andy_changes["results"][0]["id"] == "admin_doc_0" or andy_changes["results"][0]["id"] == "_user/andy" # Check that the user docs all show up in changes feed client.verify_doc_id_in_changes(url=sg_url, db=sg_db, expected_doc_id="_user/adam", auth=adam_auth) client.verify_doc_id_in_changes(url=sg_url, db=sg_db, expected_doc_id="_user/traun", auth=traun_auth) client.verify_doc_id_in_changes(url=sg_url, db=sg_db, expected_doc_id="_user/andy", auth=andy_auth) # Check that the admin doc made it to all the changes feeds client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=admin_doc, auth=adam_auth) client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=admin_doc, auth=traun_auth) client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=admin_doc, auth=andy_auth) # At this point, each user should have a changes feed that is caught up for the next section ########################################### # change feed wakes for channel add to role ########################################### abc_channel = "ABC" abc_pusher_info = userinfo.UserInfo(name="abc_pusher", password="******", channels=[abc_channel], roles=[]) abc_pusher_auth = client.create_user(url=sg_admin_url, db=sg_db, name=abc_pusher_info.name, password=abc_pusher_info.password, channels=abc_pusher_info.channels) # Add doc with ABC channel client.add_docs(url=sg_url, db=sg_db, number=1, id_prefix="abc_doc", auth=abc_pusher_auth, channels=[abc_channel]) # Get latest last_seq for next test section adam_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=adam_auth) traun_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=traun_auth) andy_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=andy_auth) with concurrent.futures.ProcessPoolExecutor() as ex: # Start changes feed for 3 users from latest last_seq adam_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=adam_changes["last_seq"], timeout=10, auth=adam_auth) traun_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=traun_changes["last_seq"], timeout=10, auth=traun_auth) andy_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=andy_changes["last_seq"], timeout=10, auth=andy_auth) # Wait for changes feed to notice there are no changes and enter wait. 2 seconds should be more than enough time.sleep(2) # Make sure the changes future is still running and has not exited due to any new changes, the feed should be caught up # and waiting assert not adam_changes_task.done() assert not traun_changes_task.done() assert not andy_changes_task.done() # Update admin role to include ABC channel # Since adam, traun, and andy are assigned to that role, they should wake up and get the 'abc_pusher_0' doc client.update_role(url=sg_admin_url, db=sg_db, name=admin_role, channels=[admin_channel, abc_channel]) adam_changes = adam_changes_task.result() assert len(adam_changes["results"]) == 1 assert adam_changes["results"][0]["id"] == "abc_doc_0" traun_changes = traun_changes_task.result() assert len(traun_changes["results"]) == 1 assert traun_changes["results"][0]["id"] == "abc_doc_0" andy_changes = adam_changes_task.result() assert len(andy_changes["results"]) == 1 assert andy_changes["results"][0]["id"] == "abc_doc_0"
def test_rebalance_sanity(params_from_base_test_setup): cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] cluster_helper = ClusterKeywords() sg_conf_name = "sync_gateway_default_functional_tests" sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper.reset_cluster(cluster_config=cluster_config, sync_gateway_config=sg_conf_path) topology = cluster_helper.get_cluster_topology(cluster_config) admin_sg_one = topology["sync_gateways"][0]["admin"] sg_one_url = topology["sync_gateways"][0]["public"] cluster_servers = topology["couchbase_servers"] cbs_one_url = cluster_servers[0] cbs_two_url = cluster_servers[1] log_info("Running: 'test_distributed_index_rebalance_sanity'") log_info("cluster_config: {}".format(cluster_config)) log_info("admin_sg: {}".format(admin_sg_one)) log_info("sg_url: {}".format(sg_one_url)) log_info("cbs_one_url: {}".format(cbs_one_url)) log_info("cbs_two_url: {}".format(cbs_two_url)) sg_db = "db" num_docs = 100 num_updates = 100 sg_user_name = "seth" sg_user_password = "******" channels = ["ABC", "CBS"] client = MobileRestClient() cb_server = CouchbaseServer(cbs_one_url) server_to_remove = CouchbaseServer(cbs_two_url) client.create_user(admin_sg_one, sg_db, sg_user_name, sg_user_password, channels=channels) session = client.create_session(admin_sg_one, sg_db, sg_user_name) with concurrent.futures.ThreadPoolExecutor(5) as executor: # Add docs to sg log_info("Adding docs to sync_gateway") docs = client.add_docs(sg_one_url, sg_db, num_docs, "test_doc", channels=channels, auth=session) assert len(docs) == num_docs # Start updating docs and rebalance out one CBS node log_info("Updating docs on sync_gateway") update_docs_task = executor.submit(client.update_docs, sg_one_url, sg_db, docs, num_updates, auth=session) # Run rebalance in background cb_server.rebalance_out(cluster_servers, server_to_remove) updated_docs = update_docs_task.result() log_info(updated_docs) # Verify docs / revisions present client.verify_docs_present(sg_one_url, sg_db, updated_docs, auth=session) # Verify docs revisions in changes feed client.verify_docs_in_changes(sg_one_url, sg_db, updated_docs, auth=session) # Rebalance Server back in to the pool cb_server.add_node(server_to_remove) cb_server.rebalance_in(cluster_servers, server_to_remove)
def test_server_goes_down_sanity(params_from_base_test_setup): """ 1. Start with a two node couchbase server cluster 2. Starting adding docs 3. Kill one of the server nodes and signal completion 4. Stop adding docs 5. Verify that that the expected docs are present and in the changes feed. 6. Start server again and add to cluster """ cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] cluster_helper = ClusterKeywords() sg_conf_name = "sync_gateway_default_functional_tests" sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper.reset_cluster(cluster_config=cluster_config, sync_gateway_config=sg_conf_path) topology = cluster_helper.get_cluster_topology(cluster_config) admin_sg = topology["sync_gateways"][0]["admin"] sg_url = topology["sync_gateways"][0]["public"] coucbase_servers = topology["couchbase_servers"] cbs_one_url = coucbase_servers[0] cbs_two_url = coucbase_servers[1] log_info("Running: 'test_server_goes_down_sanity'") log_info("cluster_config: {}".format(cluster_config)) log_info("admin_sg: {}".format(admin_sg)) log_info("sg_url: {}".format(sg_url)) log_info("cbs_one_url: {}".format(cbs_one_url)) log_info("cbs_two_url: {}".format(cbs_two_url)) sg_db = "db" num_docs = 100 sg_user_name = "seth" sg_user_password = "******" channels = ["ABC", "CBS"] client = MobileRestClient() main_server = CouchbaseServer(cbs_one_url) flakey_server = CouchbaseServer(cbs_two_url) client.create_user(admin_sg, sg_db, sg_user_name, sg_user_password, channels=channels) session = client.create_session(admin_sg, sg_db, sg_user_name) # Stop second server flakey_server.stop() # Try to add 100 docs in a loop until all succeed, if the never do, fail with timeout errors = num_docs # Wait 30 seconds for auto failover # (Minimum value suggested - http://docs.couchbase.com/admin/admin/Tasks/tasks-nodeFailover.html) # + 15 seconds to add docs timeout = 45 start = time.time() successful_add = False while not successful_add: # Fail tests if all docs do not succeed before timeout if (time.time() - start) > timeout: # Bring server back up before failing the test flakey_server.start() main_server.rebalance_in(coucbase_servers, flakey_server) raise TimeoutError("Failed to successfully put docs before timeout") try: docs = client.add_docs(url=sg_url, db=sg_db, number=num_docs, id_prefix=None, auth=session, channels=channels) # If the above add doc does not throw, it was a successfull add. successful_add = True except requests.exceptions.HTTPError as he: log_info("Failed to add docs: {}".format(he)) log_info("Seeing: {} errors".format(errors)) time.sleep(1) assert len(docs) == 100 client.verify_docs_present(url=sg_url, db=sg_db, expected_docs=docs, auth=session) try: client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=docs, auth=session, polling_interval=5) except keywords.exceptions.TimeoutException: # timeout verifying docs. Bring server back in to restore topology, then fail # Failing due to https://github.com/couchbase/sync_gateway/issues/2197 flakey_server.start() main_server.recover(flakey_server) main_server.rebalance_in(coucbase_servers, flakey_server) raise keywords.exceptions.TimeoutException("Failed to get all changes") # Test succeeded without timeout, bring server back into topology flakey_server.start() main_server.recover(flakey_server) main_server.rebalance_in(coucbase_servers, flakey_server) # Make sure all docs were not added before server was log_info("test_server_goes_down_sanity complete!")
def test_server_goes_down_rebuild_channels(params_from_base_test_setup): """ 1. Start with a two node couchbase server cluster 2. Starting adding docs 3. Kill one of the server nodes and signal completion 4. Stop adding docs 5. Verify that that the expected docs are present and in the changes feed. 6. Start server again and add to cluster """ cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] cluster_helper = ClusterKeywords() sg_conf_name = "sync_gateway_default_functional_tests" sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper.reset_cluster(cluster_config=cluster_config, sync_gateway_config=sg_conf_path) topology = cluster_helper.get_cluster_topology(cluster_config) admin_sg = topology["sync_gateways"][0]["admin"] sg_url = topology["sync_gateways"][0]["public"] coucbase_servers = topology["couchbase_servers"] cbs_one_url = coucbase_servers[0] cbs_two_url = coucbase_servers[1] log_info("Running: 'test_server_goes_down_sanity'") log_info("cluster_config: {}".format(cluster_config)) log_info("admin_sg: {}".format(admin_sg)) log_info("sg_url: {}".format(sg_url)) log_info("cbs_one_url: {}".format(cbs_one_url)) log_info("cbs_two_url: {}".format(cbs_two_url)) sg_db = "db" num_docs = 100 admin_user_info = userinfo.UserInfo( name="admin", password="******", channels=["ABC"], roles=[] ) seth_user_info = userinfo.UserInfo( name="seth", password="******", channels=["ABC"], roles=[] ) client = MobileRestClient() main_server = CouchbaseServer(cbs_one_url) flakey_server = CouchbaseServer(cbs_two_url) admin_auth = client.create_user( admin_sg, sg_db, admin_user_info.name, admin_user_info.password, channels=admin_user_info.channels ) client.create_user( admin_sg, sg_db, seth_user_info.name, seth_user_info.password, channels=seth_user_info.channels ) seth_session = client.create_session(admin_sg, sg_db, seth_user_info.name) # allow any user docs to make it to changes initial_changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=seth_session) # push docs from admin docs = client.add_docs( url=sg_url, db=sg_db, number=num_docs, id_prefix=None, channels=admin_user_info.channels, auth=admin_auth ) assert len(docs) == num_docs client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=docs, auth=seth_session) changes_before_failover = client.get_changes(url=sg_url, db=sg_db, since=initial_changes["last_seq"], auth=seth_session) assert len(changes_before_failover["results"]) == num_docs # Stop server via 'service stop' flakey_server.stop() start = time.time() while True: # Fail tests if all docs do not succeed before timeout if (time.time() - start) > 60: # Bring server back up before failing the test flakey_server.start() main_server.recover(flakey_server) main_server.rebalance_in(coucbase_servers, flakey_server) raise keywords.exceptions.TimeoutError("Failed to rebuild changes") try: # Poll until failover happens (~30 second) client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=docs, auth=seth_session) # changes requests succeeded, exit loop break except requests.exceptions.HTTPError: # Changes will fail until failover of the down server happens. Wait and try again. log_info("/db/_changes failed due to server down. Retrying ...") time.sleep(1) # Verify no new changes changes = client.get_changes( url=sg_url, db=sg_db, since=changes_before_failover["last_seq"], auth=seth_session, feed="normal" ) assert len(changes["results"]) == 0 # Check that all changes are intact from initial changes request changes = client.get_changes(url=sg_url, db=sg_db, since=initial_changes["last_seq"], auth=seth_session) assert len(changes["results"]) == num_docs coucbase_servers = topology["couchbase_servers"] # Test succeeded without timeout, bring server back into topology flakey_server.start() main_server.recover(flakey_server) main_server.rebalance_in(coucbase_servers, flakey_server)
def test_server_goes_down_sanity(params_from_base_test_setup): """ 1. Start with a two node couchbase server cluster 2. Starting adding docs 3. Kill one of the server nodes and signal completion 4. Stop adding docs 5. Verify that that the expected docs are present and in the changes feed. 6. Start server again and add to cluster """ cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_version = get_sg_version(cluster_config) if compare_versions(sg_version, '1.5') < 0: pytest.skip("This test needs multiple URLs in the SG config, not supported by SG < 1.5") cluster_helper = ClusterKeywords() sg_conf_name = "sync_gateway_default_functional_tests" sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper.reset_cluster(cluster_config=cluster_config, sync_gateway_config=sg_conf_path) topology = cluster_helper.get_cluster_topology(cluster_config) admin_sg = topology["sync_gateways"][0]["admin"] sg_url = topology["sync_gateways"][0]["public"] coucbase_servers = topology["couchbase_servers"] cbs_one_url = coucbase_servers[0] cbs_two_url = coucbase_servers[1] log_info("Running: 'test_server_goes_down_sanity'") log_info("cluster_config: {}".format(cluster_config)) log_info("admin_sg: {}".format(admin_sg)) log_info("sg_url: {}".format(sg_url)) log_info("cbs_one_url: {}".format(cbs_one_url)) log_info("cbs_two_url: {}".format(cbs_two_url)) sg_db = "db" num_docs = 100 sg_user_name = "seth" sg_user_password = "******" channels = ["ABC", "CBS"] client = MobileRestClient() main_server = couchbaseserver.CouchbaseServer(cbs_one_url) flakey_server = couchbaseserver.CouchbaseServer(cbs_two_url) client.create_user(admin_sg, sg_db, sg_user_name, sg_user_password, channels=channels) session = client.create_session(admin_sg, sg_db, sg_user_name) # Stop second server flakey_server.stop() # Try to add 100 docs in a loop until all succeed, if the never do, fail with timeout errors = num_docs # Wait 30 seconds for auto failover # (Minimum value suggested - http://docs.couchbase.com/admin/admin/Tasks/tasks-nodeFailover.html) # + 15 seconds to add docs timeout = 45 start = time.time() successful_add = False while not successful_add: # Fail tests if all docs do not succeed before timeout if (time.time() - start) > timeout: # Bring server back up before failing the test flakey_server.start() main_server.rebalance_in(coucbase_servers, flakey_server) raise TimeoutError("Failed to successfully put docs before timeout") try: docs = client.add_docs(url=sg_url, db=sg_db, number=num_docs, id_prefix=None, auth=session, channels=channels) # If the above add doc does not throw, it was a successfull add. successful_add = True except requests.exceptions.HTTPError as he: log_info("Failed to add docs: {}".format(he)) log_info("Seeing: {} errors".format(errors)) time.sleep(1) assert len(docs) == 100 client.verify_docs_present(url=sg_url, db=sg_db, expected_docs=docs, auth=session) try: client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=docs, auth=session, polling_interval=5) except keywords.exceptions.TimeoutException: # timeout verifying docs. Bring server back in to restore topology, then fail # Failing due to https://github.com/couchbase/sync_gateway/issues/2197 flakey_server.start() main_server.recover(flakey_server) main_server.rebalance_in(coucbase_servers, flakey_server) raise keywords.exceptions.TimeoutException("Failed to get all changes") # Test succeeded without timeout, bring server back into topology flakey_server.start() main_server.recover(flakey_server) main_server.rebalance_in(coucbase_servers, flakey_server) # Make sure all docs were not added before server was log_info("test_server_goes_down_sanity complete!")
def test_server_goes_down_rebuild_channels(params_from_base_test_setup): """ 1. Start with a two node couchbase server cluster 2. Starting adding docs 3. Kill one of the server nodes and signal completion 4. Stop adding docs 5. Verify that that the expected docs are present and in the changes feed. 6. Start server again and add to cluster """ cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_version = get_sg_version(cluster_config) if compare_versions(sg_version, '1.5') < 0: pytest.skip("This test needs multiple URLs in the SG config, not supported by SG < 1.5") cluster_helper = ClusterKeywords() sg_conf_name = "sync_gateway_default_functional_tests" sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper.reset_cluster(cluster_config=cluster_config, sync_gateway_config=sg_conf_path) topology = cluster_helper.get_cluster_topology(cluster_config) admin_sg = topology["sync_gateways"][0]["admin"] sg_url = topology["sync_gateways"][0]["public"] coucbase_servers = topology["couchbase_servers"] cbs_one_url = coucbase_servers[0] cbs_two_url = coucbase_servers[1] log_info("Running: 'test_server_goes_down_sanity'") log_info("cluster_config: {}".format(cluster_config)) log_info("admin_sg: {}".format(admin_sg)) log_info("sg_url: {}".format(sg_url)) log_info("cbs_one_url: {}".format(cbs_one_url)) log_info("cbs_two_url: {}".format(cbs_two_url)) sg_db = "db" num_docs = 100 admin_user_info = userinfo.UserInfo( name="admin", password="******", channels=["ABC"], roles=[] ) seth_user_info = userinfo.UserInfo( name="seth", password="******", channels=["ABC"], roles=[] ) client = MobileRestClient() main_server = couchbaseserver.CouchbaseServer(cbs_one_url) flakey_server = couchbaseserver.CouchbaseServer(cbs_two_url) admin_auth = client.create_user( admin_sg, sg_db, admin_user_info.name, admin_user_info.password, channels=admin_user_info.channels ) client.create_user( admin_sg, sg_db, seth_user_info.name, seth_user_info.password, channels=seth_user_info.channels ) seth_session = client.create_session(admin_sg, sg_db, seth_user_info.name) # allow any user docs to make it to changes initial_changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=seth_session) # push docs from admin docs = client.add_docs( url=sg_url, db=sg_db, number=num_docs, id_prefix=None, channels=admin_user_info.channels, auth=admin_auth ) assert len(docs) == num_docs client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=docs, auth=seth_session) changes_before_failover = client.get_changes(url=sg_url, db=sg_db, since=initial_changes["last_seq"], auth=seth_session) assert len(changes_before_failover["results"]) == num_docs # Stop server via 'service stop' flakey_server.stop() start = time.time() while True: # Fail tests if all docs do not succeed before timeout if (time.time() - start) > 60: # Bring server back up before failing the test flakey_server.start() main_server.recover(flakey_server) main_server.rebalance_in(coucbase_servers, flakey_server) raise keywords.exceptions.TimeoutError("Failed to rebuild changes") try: # Poll until failover happens (~30 second) client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=docs, auth=seth_session) # changes requests succeeded, exit loop break except requests.exceptions.HTTPError: # Changes will fail until failover of the down server happens. Wait and try again. log_info("/db/_changes failed due to server down. Retrying ...") time.sleep(1) # Verify no new changes changes = client.get_changes( url=sg_url, db=sg_db, since=changes_before_failover["last_seq"], auth=seth_session, feed="normal" ) assert len(changes["results"]) == 0 # Check that all changes are intact from initial changes request changes = client.get_changes(url=sg_url, db=sg_db, since=initial_changes["last_seq"], auth=seth_session) assert len(changes["results"]) == num_docs coucbase_servers = topology["couchbase_servers"] # Test succeeded without timeout, bring server back into topology flakey_server.start() main_server.recover(flakey_server) main_server.rebalance_in(coucbase_servers, flakey_server)
def test_remove_add_channels_to_doc(params_from_base_test_setup, sg_conf_name): cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] mode = params_from_base_test_setup["mode"] sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster = Cluster(cluster_config) cluster.reset(sg_conf) client = MobileRestClient() admin_user_info = userinfo.UserInfo("admin", "pass", channels=["A", "B"], roles=[]) a_user_info = userinfo.UserInfo("a_user", "pass", channels=["A"], roles=[]) admin_user_auth = client.create_user( url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password, channels=admin_user_info.channels, ) a_user_auth = client.create_user( url=sg_admin_url, db=sg_db, name=a_user_info.name, password=a_user_info.password, channels=a_user_info.channels ) a_docs = client.add_docs( url=sg_url, db=sg_db, number=50, id_prefix="a_doc", auth=admin_user_auth, channels=admin_user_info.channels ) # Build dictionay of a_docs a_docs_id_rev = {doc["id"]: doc["rev"] for doc in a_docs} assert len(a_docs_id_rev) == 50 # Wait for all docs to show up in changes client.verify_doc_id_in_changes(sg_url, sg_db, expected_doc_id="_user/a_user", auth=a_user_auth) client.verify_docs_in_changes(sg_url, sg_db, expected_docs=a_docs, auth=a_user_auth) # Get changes for 'a_user' a_user_changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=a_user_auth, feed="normal") # 'a_user' should get 50 'a_doc_*' doc and 1 '_user/a_user' doc assert len(a_user_changes["results"]) == 51 ########################### # Remove Channels from doc ########################### # Copy a_docs_id_rev to dictionary to scratch off values remove_docs_scratch_off = a_docs_id_rev.copy() assert len(remove_docs_scratch_off) == 50 # Use admin user to update the docs to remove 'A' from the channels property on the doc and add 'B' client.update_docs(url=sg_url, db=sg_db, docs=a_docs, number_updates=1, auth=admin_user_auth, channels=["B"]) # Longpoll loop requires due to the delay that changes take to permeate to the client changes_timeout = 10 start = time.time() last_seq = a_user_changes["last_seq"] while True: # If take longer than 10 seconds, fail the test if time.time() - start > changes_timeout: raise keywords.exceptions.TimeoutException("Could not find all expected docs in changs feed") # We found everything, exit loop! if remove_docs_scratch_off == {}: log_info("All expected docs found to be removed") break # Get changes for 'a_user' from last_seq a_user_changes = client.get_changes(url=sg_url, db=sg_db, since=last_seq, auth=a_user_auth, timeout=10) assert len(a_user_changes["results"]) > 0 # Loop over changes found and perform the following # 1. Check that the docs is flagged with 'removed' # 2. Cross off the doc fromt the the 'remove_docs_scratch_off' for change in a_user_changes["results"]: assert change["removed"] == ["A"] assert change["changes"][0]["rev"].startswith("2-") # This will blow up if any change is not found in that dictionary del remove_docs_scratch_off[change["id"]] # Update last_seq last_seq = a_user_changes["last_seq"] # Issue changes request from 'last_seq' and verify that the changes are up to date and returns no results a_user_changes = client.get_changes(url=sg_url, db=sg_db, since=last_seq, auth=a_user_auth, feed="normal") assert len(a_user_changes["results"]) == 0 ######################### # Add Channels to doc ######################### # Copy the a_docs_id_rev dictionary for scratch ovee add_docs_scratch_off = a_docs_id_rev.copy() assert len(add_docs_scratch_off) == 50 # Use admin user to update the docs to add ['A'] back to document channels client.update_docs(url=sg_url, db=sg_db, docs=a_docs, number_updates=1, auth=admin_user_auth, channels=["A"]) # Longpoll loop requires due to the delay that changes take to permeate to the client changes_timeout = 10 start = time.time() last_seq = a_user_changes["last_seq"] while True: # If take longer than 10 seconds, fail the test if time.time() - start > changes_timeout: raise keywords.exceptions.TimeoutException("Could not find all expected docs in changs feed") # We found everything, exit loop! if add_docs_scratch_off == {}: log_info("All expected docs found to be removed") break # Get changes for 'a_user' from last_seq a_user_changes = client.get_changes(url=sg_url, db=sg_db, since=last_seq, auth=a_user_auth, timeout=10) assert len(a_user_changes["results"]) > 0 # Loop over changes found and perform the following # 1. Check that the docs has a 3rd gen rev prefix # 2. Cross off the doc fromt the the 'add_docs_scratch_off' for change in a_user_changes["results"]: assert change["changes"][0]["rev"].startswith("3-") # This will blow up if any change is not found in that dictionary del add_docs_scratch_off[change["id"]] # Update last_seq last_seq = a_user_changes["last_seq"] # Issue changes request from 'last_seq' and verify that the changes are up to date and returns no results a_user_changes = client.get_changes(url=sg_url, db=sg_db, since=last_seq, auth=a_user_auth, feed="normal") assert len(a_user_changes["results"]) == 0
def test_backfill_channels_oneshot_changes(params_from_base_test_setup, sg_conf_name, grant_type): """ Test that checks that docs are backfilled for one shot changes for a access grant (via REST or SYNC) CHANNEL-REST = Channel is granted to user via REST CHANNEL-SYNC = Channel is granted to user via sync function access() ROLE-REST = Role is granted to user via REST ROLE-SYNC = Role is granted to user via sync function role() CHANNEL-TO-ROLE-REST = Channel is added to existing role via REST CHANNEL-TO-ROLE-SYNC = Channel is added to existing role via sync access() """ cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] mode = params_from_base_test_setup["mode"] sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" log_info("grant_type: {}".format(grant_type)) sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster = Cluster(cluster_config) cluster.reset(sg_conf) client = MobileRestClient() admin_user_info = userinfo.UserInfo("admin", "pass", channels=["A"], roles=[]) if grant_type == "CHANNEL-TO-ROLE-REST" or grant_type == "CHANNEL-TO-ROLE-SYNC": client.create_role(url=sg_admin_url, db=sg_db, name="empty_role", channels=[]) user_b_user_info = userinfo.UserInfo("USER_B", "pass", channels=["B"], roles=["empty_role"]) else: user_b_user_info = userinfo.UserInfo("USER_B", "pass", channels=["B"], roles=[]) # Create users / sessions client.create_user(url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password, channels=admin_user_info.channels) client.create_user(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, password=user_b_user_info.password, channels=user_b_user_info.channels, roles=user_b_user_info.roles) admin_session = client.create_session(url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password) user_b_session = client.create_session(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, password=user_b_user_info.password) # Create 50 "A" channel docs a_docs = client.add_docs(url=sg_url, db=sg_db, number=50, id_prefix=None, auth=admin_session, channels=["A"]) assert len(a_docs) == 50 b_docs = client.add_docs(url=sg_url, db=sg_db, number=1, id_prefix="b_doc", auth=user_b_session, channels=["B"]) assert len(b_docs) == 1 user_doc = {"id": "_user/USER_B", "rev": None} b_docs.append(user_doc) # Loop until admin user sees docs in changes client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=a_docs, auth=admin_session) # Loop until user_b sees b_doc_0 doc and _user/USER_B doc client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=b_docs, auth=user_b_session, strict=True) # Get last_seq for user_b user_b_changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=user_b_session, feed="normal") # Grant access to channel "A" if grant_type == "CHANNEL-REST": log_info( "Granting user access to channel A via Admin REST user update") # Grant via update to user in Admin API client.update_user(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, channels=["A", "B"]) elif grant_type == "CHANNEL-SYNC": log_info("Granting user access to channel A sync function access()") # Grant via access() in sync_function, then id 'channel_access' will trigger an access(doc.users, doc.channels) access_doc = document.create_doc("channel_access", channels=["A"]) access_doc["users"] = ["USER_B"] client.add_doc(url=sg_url, db=sg_db, doc=access_doc, auth=admin_session) elif grant_type == "ROLE-REST": log_info("Granting user access to channel A via Admin REST role grant") # Create role with channel A client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"]) client.update_user(url=sg_admin_url, db=sg_db, name="USER_B", channels=["B"], roles=["channel-A-role"]) elif grant_type == "ROLE-SYNC": log_info( "Granting user access to channel A via sync function role() grant") # Create role with channel A client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"]) # Grant via role() in sync_function, then id 'role_access' will trigger an role(doc.users, doc.roles) role_access_doc = document.create_doc("role_access") role_access_doc["users"] = ["USER_B"] role_access_doc["roles"] = ["role:channel-A-role"] client.add_doc(sg_url, db=sg_db, doc=role_access_doc, auth=admin_session) elif grant_type == "CHANNEL-TO-ROLE-REST": # Update the empty_role to have channel "A" client.update_role(url=sg_admin_url, db=sg_db, name="empty_role", channels=["A"]) elif grant_type == "CHANNEL-TO-ROLE-SYNC": # Grant empty_role access to channel "A" via sync function # Grant channel access to role via sync function access_doc = document.create_doc("channel_grant_to_role") access_doc["roles"] = ["role:empty_role"] access_doc["channels"] = ["A"] client.add_doc(url=sg_url, db=sg_db, doc=access_doc, auth=admin_session, use_post=True) else: pytest.fail("Unsupported grant_type!!!!") # Issue one shot changes to make sure access grant is successful, the change may not propagate immediately so retry. num_retries = 3 count = 0 while True: if count == num_retries: raise exceptions.ChangesError( "Didn't get all expected changes before timing out!") user_b_changes_after_grant = client.get_changes( url=sg_url, db=sg_db, since=user_b_changes["last_seq"], auth=user_b_session, feed="normal") if len(user_b_changes_after_grant["results"]) > 0: # Found changes, break out an validate changes! break time.sleep(1) count += 1 # User B shoud have recieved 51 docs (a_docs + 1 _user/USER_B doc) if a REST grant or 50 changes if the grant # is via the sync function changes_results = user_b_changes_after_grant["results"] assert 50 <= len(changes_results) <= 51 # Create a dictionary of id rev pair of all the docs that are not "_user/" docs from changes ids_and_revs_from_user_changes = { change["id"]: change["changes"][0]["rev"] for change in changes_results if not change["id"].startswith("_user/") } assert len(ids_and_revs_from_user_changes) == 50 # Create a list of id rev pair of all of the channel A docs ids_and_revs_from_a_docs = {doc["id"]: doc["rev"] for doc in a_docs} assert len(ids_and_revs_from_a_docs) == 50 # Check that the changes and the a_docs are identical in id and rev assert ids_and_revs_from_user_changes == ids_and_revs_from_a_docs # Get changes from last_seq of the changes request after the grant. There should be no new changes user_b_changes = client.get_changes( url=sg_url, db=sg_db, since=user_b_changes_after_grant["last_seq"], auth=user_b_session, feed="normal") assert len(user_b_changes["results"]) == 0
def test_upgrade_cbl(setup_client_syncgateway_test): """ 1. install previous version of couchbase lite. 2. Create docs in the lite. 3. upgrade to latest version of couchbase lite. 4. Verfiy docs still exits and accessible """ sg_db = "db" ls_db = "ls_db" channels = ["auto"] num_docs = 30000 cluster_config = setup_client_syncgateway_test["cluster_config"] sg_mode = setup_client_syncgateway_test["sg_mode"] ls_url = setup_client_syncgateway_test["ls_url"] sg_one_admin = setup_client_syncgateway_test["sg_admin_url"] sg_one_public = setup_client_syncgateway_test["sg_url"] liteserv = setup_client_syncgateway_test["liteserv"] device_enabled = setup_client_syncgateway_test["device_enabled"] liteserv_platform = setup_client_syncgateway_test["liteserv_platform"] liteserv_version = setup_client_syncgateway_test["liteserv_version"] if liteserv_platform.lower() == "android" or liteserv_platform.lower() == "net-msft" or device_enabled: # There is a signature match issue on Android and older version ios does not have app for devices pytest.skip('upgrade lite serv app does not work on Android and there is no app for ios device created' + ' for older version, so skipping the test') sg_config = sync_gateway_config_path_for_mode("listener_tests/listener_tests", sg_mode) c = cluster.Cluster(config=cluster_config) c.reset(sg_config_path=sg_config) log_info("ls_url: {}".format(ls_url)) log_info("sg_one_admin: {}".format(sg_one_admin)) log_info("sg_one_public: {}".format(sg_one_public)) test_name = "test_upgrade" # Downloading 1.4.0-3, we are not paramatrizing here for older version as this is not going to work # so hardcodig now rather than adding new parameter log_info("Downloading older version of LiteServ ...") liteserv.download(version_build="1.4.0-3") # Install LiteServ if device_enabled and liteserv_platform == "ios": liteserv.stop() liteserv.install_device() ls_url = liteserv.start_device("{}/logs/{}-{}-{}.txt".format(RESULTS_DIR, type(liteserv).__name__, test_name, datetime.datetime.now())) else: liteserv.stop() liteserv.install() log_info("Liteserv starts now ....") ls_url = liteserv.start("{}/logs/{}-{}-{}.txt".format(RESULTS_DIR, type(liteserv).__name__, test_name, datetime.datetime.now())) client = MobileRestClient() client.create_user(sg_one_admin, sg_db, "test", password="******", channels=channels) session = client.create_session(sg_one_admin, sg_db, "test") client.create_database(url=ls_url, name=ls_db) # Create 'num_docs' docs on LiteServ docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs, id_prefix="seeded_doc", generator="four_k", channels=channels) assert len(docs) == num_docs client.verify_docs_present(url=ls_url, db=ls_db, expected_docs=docs, timeout=240) with ThreadPoolExecutor(max_workers=2) as tpe: download_cbl_tpe = tpe.submit(download_install, liteserv, liteserv_version) client.start_replication(url=ls_url, continuous=True, from_db=ls_db, to_url=sg_one_admin, to_db=sg_db) download_cbl_tpe.result() log_info("download is done") liteserv.stop() ls_url = liteserv.start("{}/logs/{}-{}-{}.txt".format(RESULTS_DIR, type(liteserv).__name__, test_name, datetime.datetime.now())) client.start_replication(url=ls_url, continuous=True, from_db=ls_db, to_url=sg_one_admin, to_db=sg_db) client.verify_docs_present(url=ls_url, db=ls_db, expected_docs=docs, timeout=240) client.verify_docs_present(url=sg_one_public, db=sg_db, expected_docs=docs, auth=session, timeout=600)
def test_longpoll_awaken_via_sync_role(params_from_base_test_setup, sg_conf_name): """ Test that longpoll changes feed wakes up on role() in sync_function The contrived sync_function below is used: function(doc, oldDoc){ if(doc._id == "role_doc_0") { console.log("granting_access!"); role(["adam", "traun", "andy"], "role:techno"); } channel(doc, doc.channels); } """ cluster_conf = params_from_base_test_setup["cluster_config"] cluster_topology = params_from_base_test_setup["cluster_topology"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) sg_admin_url = cluster_topology["sync_gateways"][0]["admin"] sg_url = cluster_topology["sync_gateways"][0]["public"] log_info("sg_conf: {}".format(sg_conf)) log_info("sg_admin_url: {}".format(sg_admin_url)) log_info("sg_url: {}".format(sg_url)) sg_db = "db" techno_role = "techno" techno_channel = "aphex" cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) client = MobileRestClient() client.create_role(url=sg_admin_url, db=sg_db, name=techno_role, channels=[techno_channel]) admin_user_info = userinfo.UserInfo(name="admin", password="******", channels=[], roles=[techno_role]) adam_user_info = userinfo.UserInfo(name="adam", password="******", channels=[], roles=[]) traun_user_info = userinfo.UserInfo(name="traun", password="******", channels=[], roles=[]) andy_user_info = userinfo.UserInfo(name="andy", password="******", channels=[], roles=[]) admin_auth = client.create_user(url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password, roles=adam_user_info.roles) adam_auth = client.create_user(url=sg_admin_url, db=sg_db, name=adam_user_info.name, password=adam_user_info.password) traun_auth = client.create_user(url=sg_admin_url, db=sg_db, name=traun_user_info.name, password=traun_user_info.password) andy_auth = client.create_user(url=sg_admin_url, db=sg_db, name=andy_user_info.name, password=andy_user_info.password) client.add_docs(url=sg_url, db=sg_db, number=1, id_prefix="techno_doc", channels=[techno_channel], auth=admin_auth) # Get starting sequence of docs, use the last seq to progress past any _user docs. adam_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=adam_auth) traun_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=traun_auth) andy_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=andy_auth) with concurrent.futures.ProcessPoolExecutor() as ex: # Start changes feed for 3 users from latest last_seq adam_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=adam_changes["last_seq"], timeout=10, auth=adam_auth) traun_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=traun_changes["last_seq"], timeout=10, auth=traun_auth) andy_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=andy_changes["last_seq"], timeout=10, auth=andy_auth) # Wait for changes feed to notice there are no changes and enter wait. 2 seconds should be more than enough time.sleep(2) # Make sure the changes future is still running and has not exited due to any new changes, the feed should be caught up # and waiting assert not adam_changes_task.done() assert not traun_changes_task.done() assert not andy_changes_task.done() # Grant adam, traun and andy access to the "NATGEO" channel client.add_docs(url=sg_url, db=sg_db, number=1, id_prefix="role_doc", channels=[], auth=admin_auth) # Changes feed should wake up with the natgeo_0 doc adam_changes = adam_changes_task.result() assert len(adam_changes["results"]) == 1 assert adam_changes["results"][0]["id"] == "techno_doc_0" assert adam_changes["results"][0]["changes"][0]["rev"].startswith("1-") traun_changes = traun_changes_task.result() assert len(traun_changes["results"]) == 1 assert traun_changes["results"][0]["id"] == "techno_doc_0" assert traun_changes["results"][0]["changes"][0]["rev"].startswith("1-") andy_changes = andy_changes_task.result() assert len(andy_changes["results"]) == 1 assert andy_changes["results"][0]["id"] == "techno_doc_0" assert andy_changes["results"][0]["changes"][0]["rev"].startswith("1-") # Assert that the changes are caught up and should recieve no new changes from last_seq # Test for https://github.com/couchbase/sync_gateway/issues/2186 adam_changes = client.get_changes(url=sg_url, db=sg_db, since=adam_changes["last_seq"], auth=adam_auth, timeout=1) assert len(adam_changes["results"]) == 0 traun_changes = client.get_changes(url=sg_url, db=sg_db, since=traun_changes["last_seq"], auth=traun_auth, timeout=1) assert len(traun_changes["results"]) == 0 andy_changes = client.get_changes(url=sg_url, db=sg_db, since=andy_changes["last_seq"], auth=andy_auth, timeout=1) assert len(andy_changes["results"]) == 0
def test_awaken_backfill_channels_longpoll_changes_with_limit( params_from_base_test_setup, sg_conf_name, grant_type): """ Test that checks that docs are backfilled for logpoll changes with limit for a access grant (via REST or SYNC) CHANNEL-REST = Channel is granted to user via REST CHANNEL-SYNC = Channel is granted to user via sync function access() ROLE-REST = Role is granted to user via REST ROLE-SYNC = Role is granted to user via sync function role() CHANNEL-TO-ROLE-REST = Channel is added to existing role via REST CHANNEL-TO-ROLE-SYNC = Channel is added to existing role via sync access() """ cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] mode = params_from_base_test_setup["mode"] sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" log_info("grant_type: {}".format(grant_type)) sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster = Cluster(cluster_config) cluster.reset(sg_conf) client = MobileRestClient() admin_user_info = userinfo.UserInfo("admin", "pass", channels=["A"], roles=[]) if grant_type == "CHANNEL-TO-ROLE-REST" or grant_type == "CHANNEL-TO-ROLE-SYNC": client.create_role(url=sg_admin_url, db=sg_db, name="empty_role", channels=[]) user_b_user_info = userinfo.UserInfo("USER_B", "pass", channels=["B"], roles=["empty_role"]) else: user_b_user_info = userinfo.UserInfo("USER_B", "pass", channels=["B"], roles=[]) # Create users / sessions client.create_user( url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password, channels=admin_user_info.channels, ) client.create_user(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, password=user_b_user_info.password, channels=user_b_user_info.channels, roles=user_b_user_info.roles) admin_session = client.create_session(url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password) user_b_session = client.create_session(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, password=user_b_user_info.password) # Create 50 "A" channel docs a_docs = client.add_docs(url=sg_url, db=sg_db, number=50, id_prefix=None, auth=admin_session, channels=["A"]) assert len(a_docs) == 50 b_docs = client.add_docs(url=sg_url, db=sg_db, number=1, id_prefix="b_doc", auth=user_b_session, channels=["B"]) assert len(b_docs) == 1 user_doc = {"id": "_user/USER_B", "rev": None} b_docs.append(user_doc) # Loop until user_b sees b_doc_0 doc and _user/USER_B doc client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=b_docs, auth=user_b_session, strict=True) # Create a dictionary keyed on doc id for all of channel A docs ids_and_revs_from_a_docs = {doc["id"]: doc["rev"] for doc in a_docs} assert len(ids_and_revs_from_a_docs.keys()) == 50 # Get last_seq for user_b user_b_changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=user_b_session, feed="normal") with concurrent.futures.ThreadPoolExecutor(max_workers=10) as ex: # Start long poll changes feed. changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=user_b_changes["last_seq"], auth=user_b_session, timeout=10, limit=20) # Grant access to channel "A" if grant_type == "CHANNEL-REST": log_info( "Granting user access to channel A via Admin REST user update") # Grant via update to user in Admin API client.update_user(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, channels=["A", "B"]) elif grant_type == "CHANNEL-SYNC": log_info( "Granting user access to channel A sync function access()") # Grant via access() in sync_function, # then id 'channel_access' will trigger an access(doc.users, doc.channels) access_doc = document.create_doc("channel_access", channels=["A"]) access_doc["users"] = ["USER_B"] client.add_doc(url=sg_url, db=sg_db, doc=access_doc, auth=admin_session) elif grant_type == "ROLE-REST": log_info( "Granting user access to channel A via Admin REST role grant") # Create role with channel A client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"]) client.update_user(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, roles=["channel-A-role"]) elif grant_type == "ROLE-SYNC": log_info( "Granting user access to channel A via sync function role() grant" ) # Create role with channel A client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"]) # Grant via role() in sync_function, then id 'role_access' will trigger an role(doc.users, doc.roles) role_access_doc = document.create_doc("role_access") role_access_doc["users"] = ["USER_B"] role_access_doc["roles"] = ["role:channel-A-role"] client.add_doc(sg_url, db=sg_db, doc=role_access_doc, auth=admin_session) elif grant_type == "CHANNEL-TO-ROLE-REST": # Update the empty_role to have channel "A" client.update_role(url=sg_admin_url, db=sg_db, name="empty_role", channels=["A"]) elif grant_type == "CHANNEL-TO-ROLE-SYNC": # Grant empty_role access to channel "A" via sync function # Grant channel access to role via sync function access_doc = document.create_doc("channel_grant_to_role") access_doc["roles"] = ["role:empty_role"] access_doc["channels"] = ["A"] client.add_doc(url=sg_url, db=sg_db, doc=access_doc, auth=admin_session, use_post=True) else: pytest.fail("Unsupported grant_type!!!!") # Block on return of longpoll changes, feed should wake up and return 20 results changes = changes_task.result() assert len(changes["results"]) == 20 num_requests = 1 # append _user/doc to the doc scratch pad if a REST grant if grant_type == "CHANNEL-REST" or grant_type == "ROLE-REST": ids_and_revs_from_a_docs["_user/USER_B"] = None # Cross the results off from the 'a_docs' dictionary for doc in changes["results"]: del ids_and_revs_from_a_docs[doc["id"]] # Start looping longpoll changes with limit, cross off changes from dictionary each time one is found # Since 20 changes should be crossed off already, this should execute 2x. log_info("Starting looping longpoll changes with limit!") last_seq = changes["last_seq"] while True: if len(ids_and_revs_from_a_docs.keys()) == 0: log_info("All docs were found! Exiting polling loop") break changes = client.get_changes(url=sg_url, db=sg_db, since=last_seq, auth=user_b_session, limit=20, timeout=10) num_requests += 1 # There are more than 2 requests, throw an exception. if num_requests == 2: assert len(changes["results"]) == 20 elif num_requests == 3: # This will be 10 or 11 depending on if the _user/ doc is returned if grant_type == "CHANNEL-REST" or grant_type == "ROLE-REST": assert len(changes["results"]) == 11 else: assert len(changes["results"]) == 10 else: raise exceptions.ChangesError( "Looping longpoll should only have to perform 3 requests to get all the changes!!" ) # Cross the results off from the 'a_docs' dictionary. # This will blow up in docs duplicate docs are sent to changes for doc in changes["results"]: del ids_and_revs_from_a_docs[doc["id"]] last_seq = changes["last_seq"] # Shanges after longpoll zero_results = client.get_changes(url=sg_url, db=sg_db, since=last_seq, auth=user_b_session, feed="normal") # Changes should be caught up and there should be no results assert len(zero_results["results"]) == 0
def test_backfill_channels_oneshot_limit_changes(params_from_base_test_setup, sg_conf_name, grant_type): cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] mode = params_from_base_test_setup["mode"] sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" log_info("grant_type: {}".format(grant_type)) sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster = Cluster(cluster_config) cluster.reset(sg_conf) client = MobileRestClient() admin_user_info = userinfo.UserInfo("admin", "pass", channels=["A"], roles=[]) user_b_user_info = userinfo.UserInfo("USER_B", "pass", channels=["B"], roles=[]) # Create users / sessions client.create_user(url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password, channels=admin_user_info.channels) client.create_user(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, password=user_b_user_info.password, channels=user_b_user_info.channels) admin_session = client.create_session(url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password) user_b_session = client.create_session(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, password=user_b_user_info.password) # Create 50 "A" channel docs a_docs = client.add_docs(url=sg_url, db=sg_db, number=50, id_prefix=None, auth=admin_session, channels=["A"]) assert len(a_docs) == 50 b_docs = client.add_docs(url=sg_url, db=sg_db, number=1, id_prefix="b_doc", auth=user_b_session, channels=["B"]) assert len(b_docs) == 1 user_doc = {"id": "_user/USER_B", "rev": None} b_docs.append(user_doc) # Loop until user_b sees b_doc_0 doc and _user/USER_B doc client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=b_docs, auth=user_b_session, strict=True) # Get last_seq for user_b user_b_changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=user_b_session, feed="normal") # Grant access to channel "A" if grant_type == "CHANNEL-REST": log_info("Granting user access to channel A via Admin REST user update") # Grant via update to user in Admin API client.update_user(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, channels=["A", "B"]) elif grant_type == "CHANNEL-SYNC": log_info("Granting user access to channel A sync function access()") # Grant via access() in sync_function, then id 'channel_access' will trigger an access(doc.users, doc.channels) access_doc = document.create_doc("channel_access", channels=["A"]) access_doc["users"] = ["USER_B"] client.add_doc(url=sg_url, db=sg_db, doc=access_doc, auth=admin_session) elif grant_type == "ROLE-REST": log_info("Granting user access to channel A via Admin REST role grant") # Create role with channel A client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"]) client.update_user(url=sg_admin_url, db=sg_db, name="USER_B", roles=["channel-A-role"]) elif grant_type == "ROLE-SYNC": log_info("Granting user access to channel A via sync function role() grant") # Create role with channel A client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"]) # Grant via role() in sync_function, then id 'role_access' will trigger an role(doc.users, doc.roles) role_access_doc = document.create_doc("role_access") role_access_doc["users"] = ["USER_B"] role_access_doc["roles"] = ["role:channel-A-role"] client.add_doc(sg_url, db=sg_db, doc=role_access_doc, auth=admin_session) else: pytest.fail("Unsupported grant_type!!!!") # Create a dictionary keyed on doc id for all of channel A docs ids_and_revs_from_a_docs = {doc["id"]: doc["rev"] for doc in a_docs} assert len(ids_and_revs_from_a_docs.keys()) == 50 log_info("Doing 3, 1 shot changes with limit and last seq!") # Issue 3 oneshot changes with a limit of 20 ################# # Changes Req #1 ################# user_b_changes_after_grant_one = client.get_changes(url=sg_url, db=sg_db, since=user_b_changes["last_seq"], auth=user_b_session, feed="normal", limit=20) # User B shoud have recieved 20 docs due to limit assert len(user_b_changes_after_grant_one["results"]) == 20 for doc in user_b_changes_after_grant_one["results"]: # cross off keys found from 'a_docs' dictionary del ids_and_revs_from_a_docs[doc["id"]] ################# # Changes Req #2 ################# user_b_changes_after_grant_two = client.get_changes(url=sg_url, db=sg_db, since=user_b_changes_after_grant_one["last_seq"], auth=user_b_session, feed="normal", limit=20) # User B shoud have recieved 20 docs due to limit assert len(user_b_changes_after_grant_two["results"]) == 20 for doc in user_b_changes_after_grant_two["results"]: # cross off keys found from 'a_docs' dictionary del ids_and_revs_from_a_docs[doc["id"]] ################# # Changes Req #3 ################# user_b_changes_after_grant_three = client.get_changes(url=sg_url, db=sg_db, since=user_b_changes_after_grant_two["last_seq"], auth=user_b_session, feed="normal", limit=20) # User B shoud have recieved 20 docs due to limit assert len(user_b_changes_after_grant_three["results"]) == 10 for doc in user_b_changes_after_grant_three["results"]: # cross off keys found from 'a_docs' dictionary del ids_and_revs_from_a_docs[doc["id"]] # Make sure all the docs have been crossed out assert len(ids_and_revs_from_a_docs) == 0 ################# # Changes Req #4 ################# user_b_changes_after_grant_four = client.get_changes(url=sg_url, db=sg_db, since=user_b_changes_after_grant_three["last_seq"], auth=user_b_session, feed="normal", limit=20) # Changes should be caught up and there should be no results assert len(user_b_changes_after_grant_four["results"]) == 0
def test_deleted_docs_from_changes_active_only(params_from_base_test_setup, sg_conf_name): """ https://github.com/couchbase/sync_gateway/issues/2955 1. Create a document 2. Delete the document 3. Restart Sync Gateway (to force rebuild of cache from view) 4. Issue an active_only=true changes request 5. Issue an active_only=false changes request The deleted document was not being included in the result set in step 5. """ cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" num_docs = 10 client = MobileRestClient() # Add doc to SG added_doc = client.add_docs(url=sg_admin_url, db=sg_db, number=num_docs, id_prefix="test_changes") # Delete 1 doc doc_id = added_doc[0]["id"] log_info("Deleting {}".format(doc_id)) doc = client.get_doc(url=sg_admin_url, db=sg_db, doc_id=doc_id) doc_rev = doc['_rev'] client.delete_doc(sg_admin_url, sg_db, doc_id, doc_rev) # Restart SG sg_obj = SyncGateway() sg_obj.restart_sync_gateways(cluster_config) # Changes request with active_only=true session = Session() request_url = "{}/{}/_changes?active_only=true".format(sg_admin_url, sg_db) log_info("Issuing changes request {}".format(request_url)) resp = session.get(request_url) resp.raise_for_status() resp_obj = resp.json() log_info( "Checking that the deleted doc is not included in the active_only=true changes request" ) for d in resp_obj["results"]: assert doc_id not in d # Changes request with active_only=false request_url = "{}/{}/_changes?active_only=false".format( sg_admin_url, sg_db) log_info("Issuing changes request {}".format(request_url)) resp = session.get(request_url) resp.raise_for_status() resp_obj = resp.json() doc_found = False for d in resp_obj["results"]: if doc_id != d["id"]: continue else: assert doc_id == d["id"] assert d["deleted"] doc_found = True break log_info( "Checking that the deleted doc is included in the active_only=false changes request" ) assert doc_found
def test_backfill_channels_looping_longpoll_changes(params_from_base_test_setup, sg_conf_name, grant_type): cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] mode = params_from_base_test_setup["mode"] sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" log_info("grant_type: {}".format(grant_type)) sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster = Cluster(cluster_config) cluster.reset(sg_conf) client = MobileRestClient() admin_user_info = userinfo.UserInfo("admin", "pass", channels=["A"], roles=[]) user_b_user_info = userinfo.UserInfo("USER_B", "pass", channels=["B"], roles=[]) # Create users / sessions client.create_user(url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password, channels=admin_user_info.channels) client.create_user(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, password=user_b_user_info.password, channels=user_b_user_info.channels) admin_session = client.create_session(url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password) user_b_session = client.create_session(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, password=user_b_user_info.password) # Create 50 "A" channel docs a_docs = client.add_docs(url=sg_url, db=sg_db, number=50, id_prefix=None, auth=admin_session, channels=["A"]) assert len(a_docs) == 50 b_docs = client.add_docs(url=sg_url, db=sg_db, number=1, id_prefix="b_doc", auth=user_b_session, channels=["B"]) assert len(b_docs) == 1 user_doc = {"id": "_user/USER_B", "rev": None} b_docs.append(user_doc) # Loop until user_b sees b_doc_0 doc and _user/USER_B doc client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=b_docs, auth=user_b_session, strict=True) # Create a dictionary keyed on doc id for all of channel A docs ids_and_revs_from_a_docs = {doc["id"]: doc["rev"] for doc in a_docs} assert len(ids_and_revs_from_a_docs.keys()) == 50 # Get last_seq for user_b user_b_changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=user_b_session, feed="normal") with concurrent.futures.ProcessPoolExecutor() as ex: # Start long poll changes feed. changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=user_b_changes["last_seq"], auth=user_b_session, timeout=10, limit=20) # Grant access to channel "A" if grant_type == "CHANNEL-REST": log_info("Granting user access to channel A via Admin REST user update") # Grant via update to user in Admin API client.update_user(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, channels=["A", "B"]) elif grant_type == "CHANNEL-SYNC": log_info("Granting user access to channel A sync function access()") # Grant via access() in sync_function, then id 'channel_access' will trigger an access(doc.users, doc.channels) access_doc = document.create_doc("channel_access", channels=["A"]) access_doc["users"] = ["USER_B"] client.add_doc(url=sg_url, db=sg_db, doc=access_doc, auth=admin_session) elif grant_type == "ROLE-REST": log_info("Granting user access to channel A via Admin REST role grant") # Create role with channel A client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"]) client.update_user(url=sg_admin_url, db=sg_db, name="USER_B", roles=["channel-A-role"]) elif grant_type == "ROLE-SYNC": log_info("Granting user access to channel A via sync function role() grant") # Create role with channel A client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"]) # Grant via role() in sync_function, then id 'role_access' will trigger an role(doc.users, doc.roles) role_access_doc = document.create_doc("role_access") role_access_doc["users"] = ["USER_B"] role_access_doc["roles"] = ["role:channel-A-role"] client.add_doc(sg_url, db=sg_db, doc=role_access_doc, auth=admin_session) else: pytest.fail("Unsupported grant_type!!!!") # Block on return of longpoll changes, feed should wake up and return 20 results changes = changes_task.result() assert len(changes["results"]) == 20 num_requests = 1 # Cross the results off from the 'a_docs' dictionary for doc in changes["results"]: del ids_and_revs_from_a_docs[doc["id"]] # Start looping longpoll changes with limit, cross off changes from dictionary each time one is found # Since 20 changes should be crossed off already, this should execute 2x. log_info("Starting looping longpoll changes with limit!") last_seq = changes["last_seq"] while True: if len(ids_and_revs_from_a_docs.keys()) == 0: log_info("All docs were found! Exiting polling loop") break changes = client.get_changes(url=sg_url, db=sg_db, since=last_seq, auth=user_b_session, limit=20, timeout=10) num_requests += 1 # There are more than 2 requests, throw an exception. if num_requests == 2: assert len(changes["results"]) == 20 elif num_requests == 3: # This will be 10 or 11 depending on if the _user/ doc is returned assert 10 <= len(changes["results"]) <= 11 else: raise exceptions.ChangesError("Looping longpoll should only have to perform 3 requests to get all the changes!!") # Cross the results off from the 'a_docs' dictionary. # This will blow up in docs duplicate docs are sent to changes for doc in changes["results"]: if doc["id"] != "_user/USER_B": del ids_and_revs_from_a_docs[doc["id"]] last_seq = changes["last_seq"] # Shanges after longpoll zero_results = client.get_changes(url=sg_url, db=sg_db, since=last_seq, auth=user_b_session, feed="normal") # Changes should be caught up and there should be no results assert len(zero_results["results"]) == 0
def test_take_down_bring_up_sg_accel_validate_cbgt(params_from_base_test_setup, sg_conf): """ Scenario 1 Start with 3 sg_accels Take down 2 sg_accels (block until down -- poll port if needed) Doc adds with uuids (~30 sec for cbgt to reshard) polling loop: wait for all docs to come back over changes feed Call validate pindex with correct number of accels Scenario 2 (Continuation) When bringing up, you'd have to poll the cbgt_cfg until you get expected number of nodes, then you could validate the pindex with 2 accels """ cluster_conf = params_from_base_test_setup["cluster_config"] log_info("Running 'test_dcp_reshard_single_sg_accel_goes_down_and_up'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) cluster_util = ClusterKeywords() topology = cluster_util.get_cluster_topology(cluster_conf) sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" client = MobileRestClient() doc_pusher_user_info = userinfo.UserInfo("doc_pusher", "pass", channels=["A"], roles=[]) doc_pusher_auth = client.create_user( url=sg_admin_url, db=sg_db, name=doc_pusher_user_info.name, password=doc_pusher_user_info.password, channels=doc_pusher_user_info.channels ) log_info("Shutting down sg_accels: [{}, {}]".format(cluster.sg_accels[1], cluster.sg_accels[2])) # Shutdown two accel nodes in parallel with concurrent.futures.ThreadPoolExecutor(max_workers=3) as ex: sg_accel_down_task_1 = ex.submit(cluster.sg_accels[1].stop) sg_accel_down_task_2 = ex.submit(cluster.sg_accels[2].stop) assert sg_accel_down_task_1.result() == 0 assert sg_accel_down_task_2.result() == 0 log_info("Finished taking nodes down!") # It should take some time ~30 for cbgt to pick up failing nodes and reshard the pindexes. During # this add a 1000 docs a start a longpoll changes loop to see if those docs make to to the changes feed # If the reshard is successful they will show up at somepoint after. If not, the docs will fail to show up. doc_pusher_docs = client.add_docs( url=sg_url, db=sg_db, number=1000, id_prefix=None, auth=doc_pusher_auth, channels=doc_pusher_user_info.channels ) assert len(doc_pusher_docs) == 1000 client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=doc_pusher_docs, auth=doc_pusher_auth, polling_interval=5) # The pindexes should be reshared at this point since all of the changes have shown up assert cluster.validate_cbgt_pindex_distribution(num_running_sg_accels=1) log_info("Start sg_accels: [{}, {}]".format(cluster.sg_accels[1], cluster.sg_accels[2])) # Start two accel nodes in parallel status = cluster.sg_accels[1].start(sg_conf) assert status == 0 # Poll on pIndex reshard after bring 2 accel nodes back assert cluster.validate_cbgt_pindex_distribution_retry(num_running_sg_accels=2) status = cluster.sg_accels[2].start(sg_conf) assert status == 0 # Poll on pIndex reshard after bring 2 accel nodes back assert cluster.validate_cbgt_pindex_distribution_retry(num_running_sg_accels=3)
def test_initial_pull_replication(setup_client_syncgateway_test, continuous): """ 1. Prepare sync-gateway to have 10000 documents. 2. Create a single shot / continuous pull replicator and to pull the docs into a database. 3. Verify if all of the docs get pulled. Referenced issue: couchbase/couchbase-lite-android#955. """ sg_db = "db" ls_db = "ls_db" num_docs = 10000 cluster_config = setup_client_syncgateway_test["cluster_config"] ls_url = setup_client_syncgateway_test["ls_url"] sg_one_admin = setup_client_syncgateway_test["sg_admin_url"] sg_one_public = setup_client_syncgateway_test["sg_url"] sg_helper = SyncGateway() sg_helper.start_sync_gateway( cluster_config=cluster_config, url=sg_one_public, config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS) ) log_info("Running 'test_initial_pull_replication', continuous: {}".format(continuous)) log_info("ls_url: {}".format(ls_url)) log_info("sg_one_admin: {}".format(sg_one_admin)) log_info("sg_one_public: {}".format(sg_one_public)) client = MobileRestClient() client.create_user(sg_one_admin, sg_db, "seth", password="******", channels=["ABC", "NBC"]) session = client.create_session(sg_one_admin, sg_db, "seth") # Create 'num_docs' docs on sync_gateway docs = client.add_docs( url=sg_one_public, db=sg_db, number=num_docs, id_prefix="seeded_doc", generator="four_k", auth=session ) assert len(docs) == num_docs client.create_database(url=ls_url, name=ls_db) # Start oneshot pull replication repl_id = client.start_replication( url=ls_url, continuous=continuous, from_url=sg_one_admin, from_db=sg_db, to_db=ls_db ) start = time.time() if continuous: log_info("Waiting for replication status 'Idle' for: {}".format(repl_id)) # Android will report IDLE status, and drop into the 'verify_docs_present' below # due to https://github.com/couchbase/couchbase-lite-java-core/issues/1409 client.wait_for_replication_status_idle(ls_url, repl_id) else: log_info("Waiting for no replications: {}".format(repl_id)) client.wait_for_no_replications(ls_url) # Verify docs replicated to client client.verify_docs_present(url=ls_url, db=ls_db, expected_docs=docs, timeout=240) all_docs_replicated_time = time.time() - start log_info("Replication took: {}s".format(all_docs_replicated_time)) # Verify docs show up in client's changes feed client.verify_docs_in_changes(url=ls_url, db=ls_db, expected_docs=docs) replications = client.get_replications(url=ls_url) if continuous: assert len(replications) == 1, "There should only be one replication running" assert replications[0]["status"] == "Idle", "Replication Status should be 'Idle'" assert replications[0]["continuous"], "Running replication should be continuous" # Only .NET has an 'error' property if "error" in replications[0]: assert len(replications[0]["error"]) == 0 else: assert len(replications) == 0, "No replications should be running"
def test_auto_prune_listener_keeps_conflicts_sanity(setup_client_syncgateway_test): """" 1. Create db on LiteServ and add docs 2. Create db on sync_gateway and add docs with the same id 3. Create one shot push / pull replication 4. Update LiteServ 50 times 5. Assert that pruned conflict is still present 6. Delete the current revision and check that a GET returns the old conflict as the current rev """ cluster_config = setup_client_syncgateway_test["cluster_config"] ls_url = setup_client_syncgateway_test["ls_url"] sg_url = setup_client_syncgateway_test["sg_url"] sg_admin_url = setup_client_syncgateway_test["sg_admin_url"] client = MobileRestClient() sg_helper = SyncGateway() sg_helper.start_sync_gateway( cluster_config=cluster_config, url=sg_url, config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS) ) log_info("Running 'test_auto_prune_listener_keeps_conflicts_sanity' ...") log_info("ls_url: {}".format(ls_url)) log_info("sg_url: {}".format(sg_url)) log_info("sg_admin_url: {}".format(sg_admin_url)) num_docs = 1 num_revs = 100 sg_db = "db" ls_db = "ls_db" sg_user_name = "sg_user" sg_user_channels = ["NBC"] client.create_user(url=sg_admin_url, db=sg_db, name=sg_user_name, password="******", channels=sg_user_channels) sg_session = client.create_session(url=sg_admin_url, db=sg_db, name=sg_user_name) ls_db = client.create_database(url=ls_url, name=ls_db) # Create docs with same prefix to create conflicts when the dbs complete 1 shot replication ls_db_docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs, id_prefix="doc", channels=sg_user_channels) assert len(ls_db_docs) == num_docs sg_db_docs = client.add_docs( url=sg_url, db=sg_db, number=num_docs, id_prefix="doc", channels=sg_user_channels, auth=sg_session ) assert len(sg_db_docs) == num_docs # Setup one shot pull replication and wait for idle. client.start_replication(url=ls_url, continuous=False, from_url=sg_admin_url, from_db=sg_db, to_db=ls_db) client.wait_for_no_replications(url=ls_url) # There should now be a conflict on the client conflicting_revs = client.get_conflict_revs(url=ls_url, db=ls_db, doc=ls_db_docs[0]) # Get the doc with conflict rev client.get_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], rev=conflicting_revs[0]) # Update doc past revs limit and make sure conflict is still available updated_doc = client.update_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], number_updates=num_revs) client.get_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], rev=conflicting_revs[0]) # Delete doc and ensure that the conflict is now the current rev client.delete_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], rev=updated_doc["rev"]) current_doc = client.get_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"]) assert current_doc["_rev"] == conflicting_revs[0]
def test_client_to_sync_gateway_complex_replication_with_revs_limit(setup_client_syncgateway_test): """ Ported from sync_gateway tests repo ... 1. Clear server buckets ... 2. Restart liteserv with _session ... 3. Restart sync_gateway wil that config ... 4. Create db on LiteServ ... 5. Add numDocs to LiteServ db ... 6. Setup push replication from LiteServ db to sync_gateway ... 7. Verify doc present on sync_gateway (number of docs) ... 8. Update sg docs numRevs * 4 = 480 ... 9. Update docs on LiteServ db numRevs * 4 = 480 ... 10. Setup pull replication from sg -> liteserv db ... 11. Verify all docs are replicated ... 12. compact LiteServ db (POST _compact) ... 13. Verify number of revs in LiteServ db (?revs_info=true) check rev status == available fail if revs available > revs limit ... 14. Delete LiteServ db conflicts (?conflicts=true) DELETE _conflicts ... 15. Create numDoc number of docs in LiteServ db ... 16. Update LiteServ db docs numRevs * 5 (600) ... 17. Verify LiteServ db revs is < 602 ... 18. Verify LiteServ db docs revs prefix (9 * numRevs + 3) ... 19. Compact LiteServ db ... 20. Verify number of revs <= 10 ... 21. Delete LiteServ docs ... 22. Delete Server bucket ... 23. Delete LiteServ db """ ls_db_name = "ls_db" sg_db = "db" sg_user_name = "sg_user" num_docs = 10 num_revs = 100 cluster_config = setup_client_syncgateway_test["cluster_config"] ls_url = setup_client_syncgateway_test["ls_url"] sg_url = setup_client_syncgateway_test["sg_url"] sg_admin_url = setup_client_syncgateway_test["sg_admin_url"] sg_helper = SyncGateway() sg_helper.start_sync_gateway( cluster_config=cluster_config, url=sg_url, config="{}/walrus-revs-limit.json".format(SYNC_GATEWAY_CONFIGS) ) log_info("Running 'test_client_to_sync_gateway_complex_replication_with_revs_limit'") log_info("ls_url: {}".format(ls_url)) log_info("sg_admin_url: {}".format(sg_admin_url)) log_info("sg_url: {}".format(sg_url)) client = MobileRestClient() # Test the endpoint, listener does not support users but should have a default response client.get_session(url=ls_url) sg_user_channels = ["NBC"] client.create_user(url=sg_admin_url, db=sg_db, name=sg_user_name, password="******", channels=sg_user_channels) sg_session = client.create_session(url=sg_admin_url, db=sg_db, name=sg_user_name) ls_db = client.create_database(url=ls_url, name=ls_db_name) ls_db_docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs, id_prefix=ls_db, channels=sg_user_channels) assert len(ls_db_docs) == num_docs # Start replication ls_db -> sg_db repl_one = client.start_replication( url=ls_url, continuous=True, from_db=ls_db, to_url=sg_admin_url, to_db=sg_db ) client.verify_docs_present(url=sg_admin_url, db=sg_db, expected_docs=ls_db_docs) # Delay is to the updates here due to couchbase/couchbase-lite-ios#1277. # Basically, if your revs depth is small and someone is updating a doc past the revs depth before a push replication, # the push replication will have no common ancestor with sync_gateway causing conflicts to be created. # Adding a delay between updates helps this situation. There is an alternative for CBL mac and CBL NET to change the default revs client depth # but that is not configurable for Android. # Currently adding a delay will allow the replication to act as expected for all platforms now. client.update_docs(url=sg_url, db=sg_db, docs=ls_db_docs, number_updates=num_revs, delay=0.1, auth=sg_session) client.update_docs(url=ls_url, db=ls_db, docs=ls_db_docs, number_updates=num_revs, delay=0.1) # Start replication ls_db <- sg_db repl_two = client.start_replication( url=ls_url, continuous=True, from_url=sg_admin_url, from_db=sg_db, to_db=ls_db ) client.wait_for_replication_status_idle(url=ls_url, replication_id=repl_one) client.wait_for_replication_status_idle(url=ls_url, replication_id=repl_two) client.compact_database(url=ls_url, db=ls_db) # LiteServ should only have 20 revisions due to built in client revs limit client.verify_revs_num_for_docs(url=ls_url, db=ls_db, docs=ls_db_docs, expected_revs_per_doc=20) # Sync Gateway should have 100 revisions due to the specified revs_limit in the sg config and possible conflict winners from the liteserv db client.verify_max_revs_num_for_docs(url=sg_url, db=sg_db, docs=ls_db_docs, expected_max_number_revs_per_doc=100, auth=sg_session) client.delete_conflicts(url=ls_url, db=ls_db, docs=ls_db_docs) expected_generation = num_revs + 1 client.verify_docs_rev_generations(url=ls_url, db=ls_db, docs=ls_db_docs, expected_generation=expected_generation) client.verify_docs_rev_generations(url=sg_url, db=sg_db, docs=ls_db_docs, expected_generation=expected_generation, auth=sg_session) client.delete_docs(url=ls_url, db=ls_db, docs=ls_db_docs) client.verify_docs_deleted(url=ls_url, db=ls_db, docs=ls_db_docs) client.verify_docs_deleted(url=sg_admin_url, db=sg_db, docs=ls_db_docs) ls_db_docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs, id_prefix=ls_db, channels=sg_user_channels) assert len(ls_db_docs) == 10 expected_revs = num_revs + 20 + 2 client.update_docs(url=ls_url, db=ls_db, docs=ls_db_docs, delay=0.1, number_updates=num_revs) client.verify_max_revs_num_for_docs(url=ls_url, db=ls_db, docs=ls_db_docs, expected_max_number_revs_per_doc=expected_revs) expected_generation = (num_revs * 2) + 3 client.verify_docs_rev_generations(url=ls_url, db=ls_db, docs=ls_db_docs, expected_generation=expected_generation) client.compact_database(url=ls_url, db=ls_db) client.verify_revs_num_for_docs(url=ls_url, db=ls_db, docs=ls_db_docs, expected_revs_per_doc=20) client.stop_replication( url=ls_url, continuous=True, from_db=ls_db, to_url=sg_admin_url, to_db=sg_db ) client.stop_replication( url=ls_url, continuous=True, from_url=sg_admin_url, from_db=sg_db, to_db=ls_db ) client.wait_for_no_replications(url=ls_url) client.delete_conflicts(url=ls_url, db=ls_db, docs=ls_db_docs) client.delete_conflicts(url=sg_url, db=sg_db, docs=ls_db_docs, auth=sg_session) client.delete_docs(url=ls_url, db=ls_db, docs=ls_db_docs) # Start push pull and verify that all docs are deleted # Start replication ls_db -> sg_db repl_one = client.start_replication( url=ls_url, continuous=True, from_db=ls_db, to_url=sg_admin_url, to_db=sg_db ) # Start replication ls_db <- sg_db repl_two = client.start_replication( url=ls_url, continuous=True, from_url=sg_admin_url, from_db=sg_db, to_db=ls_db ) client.verify_docs_deleted(url=ls_url, db=ls_db, docs=ls_db_docs) client.verify_docs_deleted(url=sg_admin_url, db=sg_db, docs=ls_db_docs)
def test_auto_prune_with_pull(setup_client_syncgateway_test): """Sanity test for autopruning with replication 1. Create a database on LiteServ (ls_db) 2. Add doc to sync gateway 3. Update doc 50 times on sync_gateway 4. Set up pull replication from sync_gateway db to LiteServ db 5. Verify number of revisions on client is default (20) """ cluster_config = setup_client_syncgateway_test["cluster_config"] sg_mode = setup_client_syncgateway_test["sg_mode"] ls_url = setup_client_syncgateway_test["ls_url"] sg_url = setup_client_syncgateway_test["sg_url"] sg_admin_url = setup_client_syncgateway_test["sg_admin_url"] client = MobileRestClient() sg_config = sync_gateway_config_path_for_mode( "listener_tests/listener_tests", sg_mode) c = cluster.Cluster(config=cluster_config) c.reset(sg_config_path=sg_config) log_info("Running 'test_auto_prune_listener_sanity' ...") log_info("ls_url: {}".format(ls_url)) log_info("sg_url: {}".format(sg_url)) log_info("sg_admin_url: {}".format(sg_admin_url)) num_docs = 1 num_revs = 50 sg_user_channels = ["NBC"] sg_db = "db" sg_user_name = "sg_user" client.create_user(url=sg_admin_url, db=sg_db, name=sg_user_name, password="******", channels=sg_user_channels) sg_session = client.create_session(url=sg_admin_url, db=sg_db, name=sg_user_name) ls_db = client.create_database(url=ls_url, name="ls_db") sg_db_docs = client.add_docs(url=sg_url, db=sg_db, number=num_docs, id_prefix=sg_db, channels=sg_user_channels, auth=sg_session) assert len(sg_db_docs) == num_docs sg_docs_update = client.update_docs(url=sg_url, db=sg_db, docs=sg_db_docs, number_updates=num_revs, auth=sg_session) # Start continuous replication ls_db <- sg_db repl_one = client.start_replication(url=ls_url, continuous=True, from_url=sg_admin_url, from_db=sg_db, to_db=ls_db) client.wait_for_replication_status_idle(url=ls_url, replication_id=repl_one) client.verify_docs_present(url=ls_url, db=ls_db, expected_docs=sg_docs_update) client.verify_revs_num_for_docs(url=ls_url, db=ls_db, docs=sg_docs_update, expected_revs_per_doc=20)
def test_verify_open_revs_with_revs_limit_push_conflict(setup_client_syncgateway_test): """Test replication from multiple client dbs to one sync_gateway db https://github.com/couchbase/couchbase-lite-ios/issues/1277 """ cluster_config = setup_client_syncgateway_test["cluster_config"] ls_url = setup_client_syncgateway_test["ls_url"] sg_url = setup_client_syncgateway_test["sg_url"] sg_admin_url = setup_client_syncgateway_test["sg_admin_url"] num_docs = 100 num_revs = 20 sg_db = "db" sg_user_name = "sg_user" sg_helper = SyncGateway() sg_helper.start_sync_gateway( cluster_config=cluster_config, url=sg_url, config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS) ) log_info("Running 'test_verify_open_revs_with_revs_limit_push_conflict'") log_info("ls_url: {}".format(ls_url)) log_info("sg_admin_url: {}".format(sg_admin_url)) log_info("sg_url: {}".format(sg_url)) log_info("num_docs: {}".format(num_docs)) log_info("num_revs: {}".format(num_revs)) client = MobileRestClient() # Test the endpoint, listener does not support users but should have a default response client.get_session(url=ls_url) sg_user_channels = ["NBC"] client.create_user(url=sg_admin_url, db=sg_db, name=sg_user_name, password="******", channels=sg_user_channels) sg_session = client.create_session(url=sg_admin_url, db=sg_db, name=sg_user_name) ls_db = client.create_database(url=ls_url, name="ls_db") ls_db_docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs, id_prefix="ls_db", channels=sg_user_channels) assert len(ls_db_docs) == num_docs # Start replication ls_db -> sg_db repl_one = client.start_replication( url=ls_url, continuous=True, from_db=ls_db, to_url=sg_admin_url, to_db=sg_db ) client.verify_docs_present(url=sg_admin_url, db=sg_db, expected_docs=ls_db_docs) client.update_docs(url=sg_url, db=sg_db, docs=ls_db_docs, number_updates=num_revs, auth=sg_session) sg_current_doc = client.get_doc(url=sg_url, db=sg_db, doc_id="ls_db_2", auth=sg_session) client.update_docs(url=ls_url, db=ls_db, docs=ls_db_docs, number_updates=num_revs) ls_current_doc = client.get_doc(url=ls_url, db=ls_db, doc_id="ls_db_2") client.wait_for_replication_status_idle(url=ls_url, replication_id=repl_one) client.verify_doc_rev_generation(url=ls_url, db=ls_db, doc_id=ls_current_doc["_id"], expected_generation=21) client.verify_doc_rev_generation(url=sg_url, db=sg_db, doc_id=sg_current_doc["_id"], expected_generation=21, auth=sg_session) expected_ls_revs = [ls_current_doc["_rev"]] client.verify_open_revs(url=ls_url, db=ls_db, doc_id=ls_current_doc["_id"], expected_open_revs=expected_ls_revs) expected_sg_revs = [ls_current_doc["_rev"], sg_current_doc["_rev"]] client.verify_open_revs(url=sg_admin_url, db=sg_db, doc_id=sg_current_doc["_id"], expected_open_revs=expected_sg_revs)
def test_auto_prune_listener_keeps_conflicts_sanity( setup_client_syncgateway_test): """" 1. Create db on LiteServ and add docs 2. Create db on sync_gateway and add docs with the same id 3. Create one shot push / pull replication 4. Update LiteServ 50 times 5. Assert that pruned conflict is still present 6. Delete the current revision and check that a GET returns the old conflict as the current rev """ cluster_config = setup_client_syncgateway_test["cluster_config"] sg_mode = setup_client_syncgateway_test["sg_mode"] ls_url = setup_client_syncgateway_test["ls_url"] sg_url = setup_client_syncgateway_test["sg_url"] sg_admin_url = setup_client_syncgateway_test["sg_admin_url"] client = MobileRestClient() sg_config = sync_gateway_config_path_for_mode( "listener_tests/listener_tests", sg_mode) c = cluster.Cluster(config=cluster_config) c.reset(sg_config_path=sg_config) log_info("Running 'test_auto_prune_listener_keeps_conflicts_sanity' ...") log_info("ls_url: {}".format(ls_url)) log_info("sg_url: {}".format(sg_url)) log_info("sg_admin_url: {}".format(sg_admin_url)) num_docs = 1 num_revs = 100 sg_db = "db" ls_db = "ls_db" sg_user_name = "sg_user" sg_user_channels = ["NBC"] client.create_user(url=sg_admin_url, db=sg_db, name=sg_user_name, password="******", channels=sg_user_channels) sg_session = client.create_session(url=sg_admin_url, db=sg_db, name=sg_user_name) ls_db = client.create_database(url=ls_url, name=ls_db) # Create docs with same prefix to create conflicts when the dbs complete 1 shot replication ls_db_docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs, id_prefix="doc", channels=sg_user_channels) assert len(ls_db_docs) == num_docs sg_db_docs = client.add_docs(url=sg_url, db=sg_db, number=num_docs, id_prefix="doc", channels=sg_user_channels, auth=sg_session) assert len(sg_db_docs) == num_docs # Wait for changes to be available on Sync Gateway client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=sg_db_docs, auth=sg_session, polling_interval=1) # Setup one shot pull replication and wait for idle. client.start_replication(url=ls_url, continuous=False, from_url=sg_admin_url, from_db=sg_db, to_db=ls_db) client.wait_for_no_replications(url=ls_url) # There should now be a conflict on the client conflicting_revs = client.get_conflict_revs(url=ls_url, db=ls_db, doc=ls_db_docs[0]) # Get the doc with conflict rev client.get_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], rev=conflicting_revs[0]) # Update doc past revs limit and make sure conflict is still available updated_doc = client.update_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], number_updates=num_revs) client.get_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], rev=conflicting_revs[0]) # Delete doc and ensure that the conflict is now the current rev client.delete_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], rev=updated_doc["rev"]) current_doc = client.get_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"]) assert current_doc["_rev"] == conflicting_revs[0]
def test_backfill_channel_grant_to_role_longpoll(params_from_base_test_setup, sg_conf_name, grant_type, channels_to_grant): """ Test that check that docs are backfilled for a channel grant (via REST or SYNC) to existing role 1. Create a 'grantee' user with an empty role 2. 'pusher' user adds docs with channel(s) that will later be granted to 'grantee' 3. Verify that the 'pusher' sees the docs on its changes feed 4. Grant the 'grantee's role access to the pushers channels (either via REST or via sync function) 5. Verify that 'grantee' gets all of the docs after the grant """ cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] mode = params_from_base_test_setup["mode"] sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" num_docs_per_channel = 100 empty_role_name = "empty_role" log_info("grant_type: {}".format(grant_type)) log_info("channels to grant access to: {}".format(channels_to_grant)) is_multi_channel_grant = False if len(channels_to_grant) == 3: is_multi_channel_grant = True log_info("is_multi_channel_grant: {}".format(is_multi_channel_grant)) sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster = Cluster(cluster_config) cluster.reset(sg_conf) client = MobileRestClient() client.create_role(url=sg_admin_url, db=sg_db, name=empty_role_name, channels=[]) pusher_info = userinfo.UserInfo("pusher", "pass", channels=channels_to_grant, roles=[]) grantee_info = userinfo.UserInfo("grantee", "pass", channels=[], roles=[empty_role_name]) # Create users client.create_user(url=sg_admin_url, db=sg_db, name=pusher_info.name, password=pusher_info.password, channels=pusher_info.channels, roles=pusher_info.roles) pusher_session = client.create_session(url=sg_admin_url, db=sg_db, name=pusher_info.name, password=pusher_info.password) client.create_user(url=sg_admin_url, db=sg_db, name=grantee_info.name, password=grantee_info.password, channels=grantee_info.channels, roles=grantee_info.roles) grantee_session = client.create_session(url=sg_admin_url, db=sg_db, name=grantee_info.name, password=grantee_info.password) pusher_changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=pusher_session) # Make sure _user docs shows up in the changes feed assert len(pusher_changes["results"] ) == 1 and pusher_changes["results"][0]["id"] == "_user/pusher" # Add docs with the appropriate channels a_docs = client.add_docs(url=sg_url, db=sg_db, number=num_docs_per_channel, id_prefix=None, auth=pusher_session, channels=["A"]) assert len(a_docs) == 100 expected_docs = a_docs if is_multi_channel_grant: b_docs = client.add_docs(url=sg_url, db=sg_db, number=num_docs_per_channel, id_prefix=None, auth=pusher_session, channels=["B"]) assert len(b_docs) == 100 expected_docs += b_docs c_docs = client.add_docs(url=sg_url, db=sg_db, number=num_docs_per_channel, id_prefix=None, auth=pusher_session, channels=["C"]) assert len(c_docs) == 100 expected_docs += c_docs # Wait for all docs to show up on the changes feed before access grant client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=expected_docs, auth=pusher_session) # Get changes for granted before grant and assert the only changes is the user doc grantee_changes_before_grant = client.get_changes(url=sg_url, db=sg_db, since=0, auth=grantee_session) assert len(grantee_changes_before_grant["results"]) == 1 assert grantee_changes_before_grant["results"][0]["id"] == "_user/grantee" if grant_type == "CHANNEL-REST": # Grant channel access to role via REST client.update_role(url=sg_admin_url, db=sg_db, name=empty_role_name, channels=channels_to_grant) elif grant_type == "CHANNEL-SYNC": # Grant channel access to role via sync function access_doc = document.create_doc(doc_id="channel_grant_to_role") access_doc["roles"] = ["role:{}".format(empty_role_name)] access_doc["channels"] = channels_to_grant client.add_doc(url=sg_url, db=sg_db, doc=access_doc, auth=pusher_session, use_post=True) # Issue changes request after grant grantee_changes_post_grant = client.get_changes( url=sg_url, db=sg_db, since=grantee_changes_before_grant["last_seq"], auth=grantee_session, feed="longpoll") # grantee should have all the docs now if is_multi_channel_grant: # Check that the grantee gets all of the docs for channels ["A", "B", "C"] assert len( grantee_changes_post_grant["results"]) == num_docs_per_channel * 3 else: # Check that the grantee gets all of the docs for channels ["A"] assert len( grantee_changes_post_grant["results"]) == num_docs_per_channel # Disable this conditional if https://github.com/couchbase/sync_gateway/issues/2277 is fixed if mode == "di": # Issue one more changes request from the post grant last seq and make sure there are no other changes grantee_changes_post_post_grant = client.get_changes( url=sg_url, db=sg_db, since=grantee_changes_post_grant["last_seq"], auth=grantee_session, feed="normal") assert len(grantee_changes_post_post_grant["results"]) == 0
def test_writing_attachment_to_couchbase_server(params_from_base_test_setup, sg_conf_name): """ 1. Start sync_gateway with sync function that rejects all writes: function(doc, oldDoc) { throw({forbidden:"No writes!"}); } 2. Create a doc with attachment 3. Use CBS sdk to see if attachment doc exists. Doc ID will look like _sync:att:sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0= (where the suffix is the digest) 4. Assert att doc does not exist """ cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper = ClusterKeywords() cluster_helper.reset_cluster(cluster_config, sg_conf) topology = cluster_helper.get_cluster_topology(cluster_config) cbs_url = topology["couchbase_servers"][0] sg_url = topology["sync_gateways"][0]["public"] sg_url_admin = topology["sync_gateways"][0]["admin"] sg_db = "db" bucket = "data-bucket" log_info("Running 'test_writing_attachment_to_couchbase_server'") log_info("Using cbs_url: {}".format(cbs_url)) log_info("Using sg_url: {}".format(sg_url)) log_info("Using sg_url_admin: {}".format(sg_url_admin)) log_info("Using sg_db: {}".format(sg_db)) log_info("Using bucket: {}".format(bucket)) sg_user_name = "sg_user" sg_user_password = "******" sg_user_channels = ["NBC"] client = MobileRestClient() client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels) sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name) docs = client.add_docs(url=sg_url, db=sg_db, number=100, id_prefix=sg_db, channels=sg_user_channels, auth=sg_user_session) assert len(docs) == 100 # Create doc with attachment and push to sync_gateway atts = attachment.load_from_data_dir(["sample_text.txt"]) doc_with_att = document.create_doc(doc_id="att_doc", content={"sample_key": "sample_val"}, attachments=atts, channels=sg_user_channels) client.add_doc(url=sg_url, db=sg_db, doc=doc_with_att, auth=sg_user_session) server = couchbaseserver.CouchbaseServer(cbs_url) # Assert that the attachment doc gets written to couchbase server server_att_docs = server.get_server_docs_with_prefix(bucket=bucket, prefix="_sync:att:") num_att_docs = len(server_att_docs) assert num_att_docs == 1
def test_listener_two_sync_gateways(setup_client_syncgateway_test): """ Port of https://github.com/couchbaselabs/sync-gateway-tests/blob/master/tests/cbl-replication-mismatch-2-gateways.js Scenario: 1. Start 2 sync_gateways 2. Create sg_db_one db on sync_gateway one 3. Create sg_db_two db on sync_gateway two 4. Create ls_db_one and ls_db_two on Liteserv 5. Setup continuous push / pull replication from ls_db_one <-> sg_db_one 6. Setup continuous push / pull replication from ls_db_two <-> sg_db_two 7. Setup continuous push / pull replication from sg_db_one <-> ls_db_two 8. Setup continuous push / pull replication from sg_db_two <-> ls_db_one 9. Add num_docs / 2 to each liteserv database 10. Verify each database has num_docs docs 11. Verify all_docs in all dbs 12. Verify changes feed for sg_db_one and sg_db_two 13. Verify chnages feed for ls_db_one and ls_db_two """ num_docs = 500 ls_url = setup_client_syncgateway_test["ls_url"] cluster_config = setup_client_syncgateway_test["cluster_config"] sg_mode = setup_client_syncgateway_test["sg_mode"] cluster_util = ClusterKeywords() topology = cluster_util.get_cluster_topology(cluster_config) sg_one_admin_url = topology["sync_gateways"][0]["admin"] sg_two_admin_url = topology["sync_gateways"][1]["admin"] cb_server_url = topology["couchbase_servers"][0] log_info("Sync Gateway 1 admin url: {}".format(sg_one_admin_url)) log_info("Sync Gateway 2 admin url: {}".format(sg_two_admin_url)) log_info("Couchbase Server url: {}".format(cb_server_url)) c = cluster.Cluster(cluster_config) sg_config_path = sync_gateway_config_path_for_mode( "listener_tests/multiple_sync_gateways", sg_mode) c.reset(sg_config_path=sg_config_path) ls_db_one = "ls_db1" ls_db_two = "ls_db2" sg_db_one = "sg_db1" sg_db_two = "sg_db2" log_info("ls_url: {}".format(ls_url)) log_info("sg_one_admin_url: {}".format(sg_one_admin_url)) log_info("sg_two_admin_url: {}".format(sg_two_admin_url)) log_info("num_docs: {}".format(num_docs)) log_info("Running 'test_listener_two_sync_gateways' ...") client = MobileRestClient() # Delete sg_db2 on sync_gateway 1 client.delete_database(url=sg_one_admin_url, name=sg_db_two) # Delete sg_db1 on sync_gateway 2 client.delete_database(url=sg_two_admin_url, name=sg_db_one) # Create dbs on LiteServ client.create_database(ls_url, ls_db_one) client.create_database(ls_url, ls_db_two) # Start continuous push pull replication ls_db_one <-> sg_db_one client.start_replication(url=ls_url, continuous=True, from_db=ls_db_one, to_url=sg_one_admin_url, to_db=sg_db_one) client.start_replication(url=ls_url, continuous=True, from_url=sg_one_admin_url, from_db=sg_db_one, to_db=ls_db_one) # Start continuous push pull replication ls_db_two <-> sg_db_two client.start_replication(url=ls_url, continuous=True, from_db=ls_db_two, to_url=sg_two_admin_url, to_db=sg_db_two) client.start_replication(url=ls_url, continuous=True, from_url=sg_two_admin_url, from_db=sg_db_two, to_db=ls_db_two) # Start continuous push pull replication sg_db_one <-> ls_db_two client.start_replication(url=ls_url, continuous=True, from_url=sg_one_admin_url, from_db=sg_db_one, to_db=ls_db_two) client.start_replication(url=ls_url, continuous=True, from_db=ls_db_two, to_url=sg_one_admin_url, to_db=sg_db_one) # Start continuous push pull replication sg_db_two <-> ls_db_one client.start_replication(url=ls_url, continuous=True, from_url=sg_two_admin_url, from_db=sg_db_two, to_db=ls_db_one) client.start_replication(url=ls_url, continuous=True, from_db=ls_db_one, to_url=sg_two_admin_url, to_db=sg_db_two) ls_db_one_docs = client.add_docs(url=ls_url, db=ls_db_one, number=num_docs / 2, id_prefix="ls_db_one_doc") assert len(ls_db_one_docs) == num_docs / 2 ls_db_two_docs = client.add_docs(url=ls_url, db=ls_db_two, number=num_docs / 2, id_prefix="ls_db_two_doc") assert len(ls_db_two_docs) == num_docs / 2 all_docs = client.merge(ls_db_one_docs, ls_db_two_docs) assert len(all_docs) == 500 # Verify docs replicate to each db client.verify_docs_present(url=ls_url, db=ls_db_one, expected_docs=all_docs) client.verify_docs_present(url=ls_url, db=ls_db_two, expected_docs=all_docs) client.verify_docs_present(url=sg_one_admin_url, db=sg_db_one, expected_docs=all_docs) client.verify_docs_present(url=sg_two_admin_url, db=sg_db_two, expected_docs=all_docs) # Verify changes feeds for each db client.verify_docs_in_changes(url=ls_url, db=ls_db_one, expected_docs=all_docs) client.verify_docs_in_changes(url=ls_url, db=ls_db_two, expected_docs=all_docs) client.verify_docs_in_changes(url=sg_one_admin_url, db=sg_db_one, expected_docs=all_docs) client.verify_docs_in_changes(url=sg_two_admin_url, db=sg_db_two, expected_docs=all_docs)
def test_listener_two_sync_gateways(setup_client_2sgs_test): """ Port of https://github.com/couchbaselabs/sync-gateway-tests/blob/master/tests/cbl-replication-mismatch-2-gateways.js Scenario: 1. Start 2 sync_gateways 2. Create sg_db_one db on sync_gateway one 3. Create sg_db_two db on sync_gateway two 4. Create ls_db_one and ls_db_two on Liteserv 5. Setup continuous push / pull replication from ls_db_one <-> sg_db_one 6. Setup continuous push / pull replication from ls_db_two <-> sg_db_two 7. Setup continuous push / pull replication from sg_db_one <-> ls_db_two 8. Setup continuous push / pull replication from sg_db_two <-> ls_db_one 9. Add num_docs / 2 to each liteserv database 10. Verify each database has num_docs docs 11. Verify all_docs in all dbs 12. Verify changes feed for sg_db_one and sg_db_two 13. Verify chnages feed for ls_db_one and ls_db_two """ num_docs = 500 ls_url = setup_client_2sgs_test["ls_url"] cluster_config = setup_client_2sgs_test["cluster_config"] sg_one_admin_url = setup_client_2sgs_test["sg_one_admin_url"] sg_two_admin_url = setup_client_2sgs_test["sg_two_admin_url"] sg_util = SyncGateway() sg_util.start_sync_gateway(cluster_config=cluster_config, url=sg_one_admin_url, config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)) sg_util.start_sync_gateway(cluster_config=cluster_config, url=sg_two_admin_url, config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)) ls_db_one = "ls_db1" ls_db_two = "ls_db2" sg_db_one = "sg_db1" sg_db_two = "sg_db2" log_info("ls_url: {}".format(ls_url)) log_info("sg_one_admin_url: {}".format(sg_one_admin_url)) log_info("sg_two_admin_url: {}".format(sg_two_admin_url)) log_info("num_docs: {}".format(num_docs)) log_info("Running 'test_listener_two_sync_gateways' ...") client = MobileRestClient() # Create dbs on sync_gateway client.create_database(sg_one_admin_url, sg_db_one, "walrus:") client.create_database(sg_two_admin_url, sg_db_two, "walrus:") # Create dbs on LiteServ client.create_database(ls_url, ls_db_one) client.create_database(ls_url, ls_db_two) # Start continuous push pull replication ls_db_one <-> sg_db_one client.start_replication( url=ls_url, continuous=True, from_db=ls_db_one, to_url=sg_one_admin_url, to_db=sg_db_one ) client.start_replication( url=ls_url, continuous=True, from_url=sg_one_admin_url, from_db=sg_db_one, to_db=ls_db_one ) # Start continuous push pull replication ls_db_two <-> sg_db_two client.start_replication( url=ls_url, continuous=True, from_db=ls_db_two, to_url=sg_two_admin_url, to_db=sg_db_two ) client.start_replication( url=ls_url, continuous=True, from_url=sg_two_admin_url, from_db=sg_db_two, to_db=ls_db_two ) # Start continuous push pull replication sg_db_one <-> ls_db_two client.start_replication( url=ls_url, continuous=True, from_url=sg_one_admin_url, from_db=sg_db_one, to_db=ls_db_two ) client.start_replication( url=ls_url, continuous=True, from_db=ls_db_two, to_url=sg_one_admin_url, to_db=sg_db_one ) # Start continuous push pull replication sg_db_two <-> ls_db_one client.start_replication( url=ls_url, continuous=True, from_url=sg_two_admin_url, from_db=sg_db_two, to_db=ls_db_one ) client.start_replication( url=ls_url, continuous=True, from_db=ls_db_one, to_url=sg_two_admin_url, to_db=sg_db_two ) ls_db_one_docs = client.add_docs(url=ls_url, db=ls_db_one, number=num_docs / 2, id_prefix="ls_db_one_doc") assert len(ls_db_one_docs) == num_docs / 2 ls_db_two_docs = client.add_docs(url=ls_url, db=ls_db_two, number=num_docs / 2, id_prefix="ls_db_two_doc") assert len(ls_db_two_docs) == num_docs / 2 all_docs = client.merge(ls_db_one_docs, ls_db_two_docs) assert len(all_docs) == 500 # Verify docs replicate to each db client.verify_docs_present(url=ls_url, db=ls_db_one, expected_docs=all_docs) client.verify_docs_present(url=ls_url, db=ls_db_two, expected_docs=all_docs) client.verify_docs_present(url=sg_one_admin_url, db=sg_db_one, expected_docs=all_docs) client.verify_docs_present(url=sg_two_admin_url, db=sg_db_two, expected_docs=all_docs) # Verify changes feeds for each db client.verify_docs_in_changes(url=ls_url, db=ls_db_one, expected_docs=all_docs) client.verify_docs_in_changes(url=ls_url, db=ls_db_two, expected_docs=all_docs) client.verify_docs_in_changes(url=sg_one_admin_url, db=sg_db_one, expected_docs=all_docs) client.verify_docs_in_changes(url=sg_two_admin_url, db=sg_db_two, expected_docs=all_docs)