def test_listener_two_sync_gateways(setup_client_syncgateway_test): """ Port of https://github.com/couchbaselabs/sync-gateway-tests/blob/master/tests/cbl-replication-mismatch-2-gateways.js Scenario: 1. Start 2 sync_gateways 2. Create sg_db_one db on sync_gateway one 3. Create sg_db_two db on sync_gateway two 4. Create ls_db_one and ls_db_two on Liteserv 5. Setup continuous push / pull replication from ls_db_one <-> sg_db_one 6. Setup continuous push / pull replication from ls_db_two <-> sg_db_two 7. Setup continuous push / pull replication from sg_db_one <-> ls_db_two 8. Setup continuous push / pull replication from sg_db_two <-> ls_db_one 9. Add num_docs / 2 to each liteserv database 10. Verify each database has num_docs docs 11. Verify all_docs in all dbs 12. Verify changes feed for sg_db_one and sg_db_two 13. Verify chnages feed for ls_db_one and ls_db_two """ num_docs = 500 ls_url = setup_client_syncgateway_test["ls_url"] cluster_config = setup_client_syncgateway_test["cluster_config"] sg_mode = setup_client_syncgateway_test["sg_mode"] cluster_util = ClusterKeywords() topology = cluster_util.get_cluster_topology(cluster_config) sg_one_admin_url = topology["sync_gateways"][0]["admin"] sg_two_admin_url = topology["sync_gateways"][1]["admin"] cb_server_url = topology["couchbase_servers"][0] log_info("Sync Gateway 1 admin url: {}".format(sg_one_admin_url)) log_info("Sync Gateway 2 admin url: {}".format(sg_two_admin_url)) log_info("Couchbase Server url: {}".format(cb_server_url)) c = cluster.Cluster(cluster_config) sg_config_path = sync_gateway_config_path_for_mode( "listener_tests/multiple_sync_gateways", sg_mode) c.reset(sg_config_path=sg_config_path) ls_db_one = "ls_db1" ls_db_two = "ls_db2" sg_db_one = "sg_db1" sg_db_two = "sg_db2" log_info("ls_url: {}".format(ls_url)) log_info("sg_one_admin_url: {}".format(sg_one_admin_url)) log_info("sg_two_admin_url: {}".format(sg_two_admin_url)) log_info("num_docs: {}".format(num_docs)) log_info("Running 'test_listener_two_sync_gateways' ...") client = MobileRestClient() # Delete sg_db2 on sync_gateway 1 client.delete_database(url=sg_one_admin_url, name=sg_db_two) # Delete sg_db1 on sync_gateway 2 client.delete_database(url=sg_two_admin_url, name=sg_db_one) # Create dbs on LiteServ client.create_database(ls_url, ls_db_one) client.create_database(ls_url, ls_db_two) # Start continuous push pull replication ls_db_one <-> sg_db_one client.start_replication(url=ls_url, continuous=True, from_db=ls_db_one, to_url=sg_one_admin_url, to_db=sg_db_one) client.start_replication(url=ls_url, continuous=True, from_url=sg_one_admin_url, from_db=sg_db_one, to_db=ls_db_one) # Start continuous push pull replication ls_db_two <-> sg_db_two client.start_replication(url=ls_url, continuous=True, from_db=ls_db_two, to_url=sg_two_admin_url, to_db=sg_db_two) client.start_replication(url=ls_url, continuous=True, from_url=sg_two_admin_url, from_db=sg_db_two, to_db=ls_db_two) # Start continuous push pull replication sg_db_one <-> ls_db_two client.start_replication(url=ls_url, continuous=True, from_url=sg_one_admin_url, from_db=sg_db_one, to_db=ls_db_two) client.start_replication(url=ls_url, continuous=True, from_db=ls_db_two, to_url=sg_one_admin_url, to_db=sg_db_one) # Start continuous push pull replication sg_db_two <-> ls_db_one client.start_replication(url=ls_url, continuous=True, from_url=sg_two_admin_url, from_db=sg_db_two, to_db=ls_db_one) client.start_replication(url=ls_url, continuous=True, from_db=ls_db_one, to_url=sg_two_admin_url, to_db=sg_db_two) ls_db_one_docs = client.add_docs(url=ls_url, db=ls_db_one, number=num_docs / 2, id_prefix="ls_db_one_doc") assert len(ls_db_one_docs) == num_docs / 2 ls_db_two_docs = client.add_docs(url=ls_url, db=ls_db_two, number=num_docs / 2, id_prefix="ls_db_two_doc") assert len(ls_db_two_docs) == num_docs / 2 all_docs = client.merge(ls_db_one_docs, ls_db_two_docs) assert len(all_docs) == 500 # Verify docs replicate to each db client.verify_docs_present(url=ls_url, db=ls_db_one, expected_docs=all_docs) client.verify_docs_present(url=ls_url, db=ls_db_two, expected_docs=all_docs) client.verify_docs_present(url=sg_one_admin_url, db=sg_db_one, expected_docs=all_docs) client.verify_docs_present(url=sg_two_admin_url, db=sg_db_two, expected_docs=all_docs) # Verify changes feeds for each db client.verify_docs_in_changes(url=ls_url, db=ls_db_one, expected_docs=all_docs) client.verify_docs_in_changes(url=ls_url, db=ls_db_two, expected_docs=all_docs) client.verify_docs_in_changes(url=sg_one_admin_url, db=sg_db_one, expected_docs=all_docs) client.verify_docs_in_changes(url=sg_two_admin_url, db=sg_db_two, expected_docs=all_docs)
def test_replication_with_session_cookie(setup_client_syncgateway_test): """Regression test for https://github.com/couchbase/couchbase-lite-android/issues/817 1. SyncGateway Config with guest disabled = true and One user added (e.g. user1 / 1234) 2. Create a new session on SGW for the user1 by using POST /_session. Capture the SyncGatewaySession cookie from the set-cookie in the response header. 3. Start continuous push and pull replicator on the LiteServ with SyncGatewaySession cookie. Make sure that both replicators start correctly 4. Delete the session from SGW by sending DELETE /_sessions/ to SGW 5. Cancel both push and pull replicator on the LiteServ 6. Repeat step 1 and 2 """ ls_db = "ls_db" sg_db = "db" cluster_config = setup_client_syncgateway_test["cluster_config"] ls_url = setup_client_syncgateway_test["ls_url"] sg_url = setup_client_syncgateway_test["sg_url"] sg_admin_url = setup_client_syncgateway_test["sg_admin_url"] sg_helper = SyncGateway() sg_helper.start_sync_gateway( cluster_config=cluster_config, url=sg_url, config="{}/walrus-user.json".format(SYNC_GATEWAY_CONFIGS) ) log_info("Running 'test_replication_with_session_cookie'") log_info("ls_url: {}".format(ls_url)) log_info("sg_admin_url: {}".format(sg_admin_url)) log_info("sg_url: {}".format(sg_url)) client = MobileRestClient() client.create_database(url=ls_url, name=ls_db) # Get session header for user_1 session_header = client.create_session_header(url=sg_url, db=sg_db, name="user_1", password="******") # Get session id from header session_parts = re.split("=|;", session_header) session_id = session_parts[1] log_info("{}: {}".format(session_parts[0], session_id)) session = (session_parts[0], session_id) # Start authenticated push replication repl_one = client.start_replication( url=ls_url, continuous=True, from_db=ls_db, to_url=sg_url, to_db=sg_db, to_auth=session_header ) # Start authenticated pull replication repl_two = client.start_replication( url=ls_url, continuous=True, from_url=sg_url, from_db=sg_db, from_auth=session_header, to_db=ls_db, ) # Wait for 2 replications to be 'Idle', On .NET they may not be immediately available via _active_tasks client.wait_for_replication_status_idle(ls_url, repl_one) client.wait_for_replication_status_idle(ls_url, repl_two) replications = client.get_replications(ls_url) assert len(replications) == 2, "2 replications (push / pull should be running)" num_docs_pushed = 100 # Sanity test docs ls_docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs_pushed, id_prefix="ls_doc", channels=["ABC"]) assert len(ls_docs) == num_docs_pushed sg_docs = client.add_docs(url=sg_url, db=sg_db, number=num_docs_pushed, id_prefix="sg_doc", auth=session, channels=["ABC"]) assert len(sg_docs) == num_docs_pushed all_docs = client.merge(ls_docs, sg_docs) log_info(all_docs) client.verify_docs_present(url=sg_admin_url, db=sg_db, expected_docs=all_docs) client.verify_docs_present(url=ls_url, db=ls_db, expected_docs=all_docs) # GET from session endpoint /{db}/_session/{session-id} session = client.get_session(url=sg_admin_url, db=sg_db, session_id=session_id) assert len(session["userCtx"]["channels"]) == 2, "There should be only 2 channels for the user" assert "ABC" in session["userCtx"]["channels"], "The channel info should contain 'ABC'" assert session["userCtx"]["name"] == "user_1", "The user should have the name 'user_1'" assert len(session["authentication_handlers"]) == 2, "There should be 2 authentication_handlers" assert "default" in session["authentication_handlers"], "Did not find 'default' in authentication_headers" assert "cookie" in session["authentication_handlers"], "Did not find 'cookie' in authentication_headers" log_info("SESSIONs: {}".format(session)) # Delete session via sg admin port and _user rest endpoint client.delete_session(url=sg_admin_url, db=sg_db, user_name="user_1", session_id=session_id) # Make sure session is deleted try: session = client.get_session(url=sg_admin_url, db=sg_db, session_id=session_id) except HTTPError as he: expected_error_code = he.response.status_code log_info(expected_error_code) assert expected_error_code == 404, "Expected 404 status, actual {}".format(expected_error_code) # Cancel the replications # Stop repl_one client.stop_replication( url=ls_url, continuous=True, from_db=ls_db, to_url=sg_url, to_db=sg_db, to_auth=session_header ) # Stop repl_two client.stop_replication( url=ls_url, continuous=True, from_url=sg_url, from_db=sg_db, from_auth=session_header, to_db=ls_db, ) client.wait_for_no_replications(ls_url) replications = client.get_replications(ls_url) assert len(replications) == 0, "All replications should be stopped" # Create new session and new push / pull replications session_header = client.create_session_header(url=sg_url, db=sg_db, name="user_1", password="******") # Get session id from header session_parts = re.split("=|;", session_header) session_id = session_parts[1] log_info("{}: {}".format(session_parts[0], session_id)) # Start authenticated push replication repl_one = client.start_replication( url=ls_url, continuous=True, from_db=ls_db, to_url=sg_url, to_db=sg_db, to_auth=session_header ) # Start authenticated pull replication repl_two = client.start_replication( url=ls_url, continuous=True, from_url=sg_url, from_db=sg_db, from_auth=session_header, to_db=ls_db, ) replications = client.get_replications(ls_url) assert len(replications) == 2, "2 replications (push / pull should be running), found: {}".format(2) session = client.get_session(url=sg_admin_url, db=sg_db, session_id=session_id) assert len(session["userCtx"]["channels"]) == 2, "There should be only 2 channels for the user" assert "ABC" in session["userCtx"]["channels"], "The channel info should contain 'ABC'" assert session["userCtx"]["name"] == "user_1", "The user should have the name 'user_1'" assert len(session["authentication_handlers"]) == 2, "There should be 2 authentication_handlers" assert "default" in session["authentication_handlers"], "Did not find 'default' in authentication_headers" assert "cookie" in session["authentication_handlers"], "Did not find 'cookie' in authentication_headers" log_info("SESSIONs: {}".format(session)) # Delete session via sg admin port and db rest endpoint client.delete_session(url=sg_admin_url, db=sg_db, session_id=session_id) # Make sure session is deleted try: session = client.get_session(url=sg_admin_url, db=sg_db, session_id=session_id) except HTTPError as he: expected_error_code = he.response.status_code log_info(expected_error_code) assert expected_error_code == 404, "Expected 404 status, actual {}".format(expected_error_code)
def test_peer_2_peer_sanity(setup_p2p_test): """ 1. Sanity P2P Scenario 2. Launch LiteServ 1 and LiteServ 2 3. Create a database on each LiteServ 4. Start continuous push pull replication from each db to the other 5. Add docs to each db 6. Verify the docs show up at each db 7. Verify the docs show up in the database's changes feed. """ ls_url_one = setup_p2p_test["ls_url_one"] ls_url_two = setup_p2p_test["ls_url_two"] num_docs_per_db = 1000 log_info("ls_url_one: {}".format(ls_url_one)) log_info("ls_url_two: {}".format(ls_url_two)) client = MobileRestClient() log_info("Creating databases") ls_db1 = client.create_database(url=ls_url_one, name="ls_db1") ls_db2 = client.create_database(url=ls_url_two, name="ls_db2") # Setup continuous push / pull replication from LiteServ 1 ls_db1 to LiteServ 2 ls_db2 repl_one = client.start_replication( url=ls_url_one, continuous=True, from_db=ls_db1, to_url=ls_url_two, to_db=ls_db2 ) repl_two = client.start_replication( url=ls_url_one, continuous=True, from_url=ls_url_two, from_db=ls_db2, to_db=ls_db1 ) # Setup continuous push / pull replication from LiteServ 2 ls_db2 to LiteServ 1 ls_db1 repl_three = client.start_replication( url=ls_url_two, continuous=True, from_db=ls_db2, to_url=ls_url_one, to_db=ls_db1 ) repl_four = client.start_replication( url=ls_url_two, continuous=True, from_url=ls_url_one, from_db=ls_db1, to_db=ls_db2 ) client.wait_for_replication_status_idle(url=ls_url_one, replication_id=repl_one) client.wait_for_replication_status_idle(url=ls_url_one, replication_id=repl_two) client.wait_for_replication_status_idle(url=ls_url_two, replication_id=repl_three) client.wait_for_replication_status_idle(url=ls_url_two, replication_id=repl_four) ls_url_one_replications = client.get_replications(ls_url_one) assert len(ls_url_one_replications) == 2 ls_url_two_replications = client.get_replications(ls_url_two) assert len(ls_url_two_replications) == 2 ls_db1_docs = client.add_docs(url=ls_url_one, db=ls_db1, number=num_docs_per_db, id_prefix="test_ls_db1") assert len(ls_db1_docs) == num_docs_per_db ls_db2_docs = client.add_docs(url=ls_url_two, db=ls_db2, number=num_docs_per_db, id_prefix="test_ls_db2") assert len(ls_db2_docs) == num_docs_per_db all_docs = client.merge(ls_db1_docs, ls_db2_docs) assert len(all_docs) == 2000 client.verify_docs_present(url=ls_url_one, db=ls_db1, expected_docs=all_docs) client.verify_docs_present(url=ls_url_two, db=ls_db2, expected_docs=all_docs) client.verify_docs_in_changes(url=ls_url_one, db=ls_db1, expected_docs=all_docs) client.verify_docs_in_changes(url=ls_url_two, db=ls_db2, expected_docs=all_docs)
def test_listener_two_sync_gateways(setup_client_2sgs_test): """ Port of https://github.com/couchbaselabs/sync-gateway-tests/blob/master/tests/cbl-replication-mismatch-2-gateways.js Scenario: 1. Start 2 sync_gateways 2. Create sg_db_one db on sync_gateway one 3. Create sg_db_two db on sync_gateway two 4. Create ls_db_one and ls_db_two on Liteserv 5. Setup continuous push / pull replication from ls_db_one <-> sg_db_one 6. Setup continuous push / pull replication from ls_db_two <-> sg_db_two 7. Setup continuous push / pull replication from sg_db_one <-> ls_db_two 8. Setup continuous push / pull replication from sg_db_two <-> ls_db_one 9. Add num_docs / 2 to each liteserv database 10. Verify each database has num_docs docs 11. Verify all_docs in all dbs 12. Verify changes feed for sg_db_one and sg_db_two 13. Verify chnages feed for ls_db_one and ls_db_two """ num_docs = 500 ls_url = setup_client_2sgs_test["ls_url"] cluster_config = setup_client_2sgs_test["cluster_config"] sg_one_admin_url = setup_client_2sgs_test["sg_one_admin_url"] sg_two_admin_url = setup_client_2sgs_test["sg_two_admin_url"] sg_util = SyncGateway() sg_util.start_sync_gateway(cluster_config=cluster_config, url=sg_one_admin_url, config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)) sg_util.start_sync_gateway(cluster_config=cluster_config, url=sg_two_admin_url, config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)) ls_db_one = "ls_db1" ls_db_two = "ls_db2" sg_db_one = "sg_db1" sg_db_two = "sg_db2" log_info("ls_url: {}".format(ls_url)) log_info("sg_one_admin_url: {}".format(sg_one_admin_url)) log_info("sg_two_admin_url: {}".format(sg_two_admin_url)) log_info("num_docs: {}".format(num_docs)) log_info("Running 'test_listener_two_sync_gateways' ...") client = MobileRestClient() # Create dbs on sync_gateway client.create_database(sg_one_admin_url, sg_db_one, "walrus:") client.create_database(sg_two_admin_url, sg_db_two, "walrus:") # Create dbs on LiteServ client.create_database(ls_url, ls_db_one) client.create_database(ls_url, ls_db_two) # Start continuous push pull replication ls_db_one <-> sg_db_one client.start_replication( url=ls_url, continuous=True, from_db=ls_db_one, to_url=sg_one_admin_url, to_db=sg_db_one ) client.start_replication( url=ls_url, continuous=True, from_url=sg_one_admin_url, from_db=sg_db_one, to_db=ls_db_one ) # Start continuous push pull replication ls_db_two <-> sg_db_two client.start_replication( url=ls_url, continuous=True, from_db=ls_db_two, to_url=sg_two_admin_url, to_db=sg_db_two ) client.start_replication( url=ls_url, continuous=True, from_url=sg_two_admin_url, from_db=sg_db_two, to_db=ls_db_two ) # Start continuous push pull replication sg_db_one <-> ls_db_two client.start_replication( url=ls_url, continuous=True, from_url=sg_one_admin_url, from_db=sg_db_one, to_db=ls_db_two ) client.start_replication( url=ls_url, continuous=True, from_db=ls_db_two, to_url=sg_one_admin_url, to_db=sg_db_one ) # Start continuous push pull replication sg_db_two <-> ls_db_one client.start_replication( url=ls_url, continuous=True, from_url=sg_two_admin_url, from_db=sg_db_two, to_db=ls_db_one ) client.start_replication( url=ls_url, continuous=True, from_db=ls_db_one, to_url=sg_two_admin_url, to_db=sg_db_two ) ls_db_one_docs = client.add_docs(url=ls_url, db=ls_db_one, number=num_docs / 2, id_prefix="ls_db_one_doc") assert len(ls_db_one_docs) == num_docs / 2 ls_db_two_docs = client.add_docs(url=ls_url, db=ls_db_two, number=num_docs / 2, id_prefix="ls_db_two_doc") assert len(ls_db_two_docs) == num_docs / 2 all_docs = client.merge(ls_db_one_docs, ls_db_two_docs) assert len(all_docs) == 500 # Verify docs replicate to each db client.verify_docs_present(url=ls_url, db=ls_db_one, expected_docs=all_docs) client.verify_docs_present(url=ls_url, db=ls_db_two, expected_docs=all_docs) client.verify_docs_present(url=sg_one_admin_url, db=sg_db_one, expected_docs=all_docs) client.verify_docs_present(url=sg_two_admin_url, db=sg_db_two, expected_docs=all_docs) # Verify changes feeds for each db client.verify_docs_in_changes(url=ls_url, db=ls_db_one, expected_docs=all_docs) client.verify_docs_in_changes(url=ls_url, db=ls_db_two, expected_docs=all_docs) client.verify_docs_in_changes(url=sg_one_admin_url, db=sg_db_one, expected_docs=all_docs) client.verify_docs_in_changes(url=sg_two_admin_url, db=sg_db_two, expected_docs=all_docs)