def test_bucket_shadow_low_revs_limit_repeated_deletes(params_from_base_test_setup): """ Validate that Sync Gateway doesn't panic (and instead creates a conflict branch and prints a warning) after doing the following steps: - Set revs_limit to 5 - Create a doc via SG - Issue a delete operation for that doc via SG - Repeat step 3 5x. (each additional delete will create a new revision in SG, but the delete on the source bucket will fail with the 'not found' error, which also means that upstream_rev won't get incremented - Recreate the doc in the source bucket See https://github.com/couchbaselabs/sync-gateway-testcluster/issues/291#issuecomment-191521993 """ cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] default_config_path_shadower_low_revs = sync_gateway_config_path_for_mode("sync_gateway_bucketshadow_low_revs", mode) default_config_path_non_shadower_low_revs = sync_gateway_config_path_for_mode("sync_gateway_default_low_revs", mode) log_info("Running 'test_bucket_shadow_low_revs_limit_repeated_deletes'") log_info("Using cluster_config: {}".format(cluster_config)) cluster = Cluster(config=cluster_config) sc = init_shadow_cluster(cluster, default_config_path_shadower_low_revs, default_config_path_non_shadower_low_revs) # Write doc into shadower SG doc_id = sc.alice_shadower.add_doc() # Wait until it gets to source bucket get_doc_from_source_bucket_retry(doc_id, sc.source_bucket) # Wait until upstream-rev in _sync metadata is non empty # Otherwise, this will not reproduce a panic while True: doc = sc.data_bucket.get(doc_id) if doc.success: if "upstream_rev" in doc.value["_sync"]: break time.sleep(1) # Repeatedly issue a delete operation for that doc via SG # Keep adding tombstone revs to the one and only branch rev_id_to_delete = None for i in xrange(100): resp = sc.alice_shadower.delete_doc(doc_id, rev_id_to_delete) rev_id_to_delete = resp["rev"] # Recreate doc with that ID in the source bucket sc.source_bucket.upsert(doc_id, json.loads('{"foo":"bar"}')) # Check if SG's are up errors = cluster.verify_alive(sc.mode) assert len(errors) == 0 # Restart Shadow SG sc.shadower_sg.stop() sc.shadower_sg.start(default_config_path_shadower_low_revs)
def test_sync_gateway_config_for_mode_exists(): config = sync_gateway_config_path_for_mode( "sync_gateway_default_functional_tests", "cc") assert config == "resources/sync_gateway_configs/sync_gateway_default_functional_tests_cc.json" config = sync_gateway_config_path_for_mode( "sync_gateway_default_functional_tests", "di") assert config == "resources/sync_gateway_configs/sync_gateway_default_functional_tests_di.json"
def test_openidconnect_invalid_scope(params_from_base_test_setup, sg_conf_name): """Try to discover the authenticate endpoint URL with a test provider that has an invalid scope, and expect an error""" cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper = ClusterKeywords() topology = cluster_helper.get_cluster_topology(cluster_config) sg_url = topology["sync_gateways"][0]["public"] sg_db = "db" log_info("Running 'test_openidconnect_invalid_scope'") log_info("Using cluster_config: {}".format(cluster_config)) log_info("Using sg_url: {}".format(sg_url)) log_info("Using sg_db: {}".format(sg_db)) cluster_helper = ClusterKeywords() cluster_helper.reset_cluster( cluster_config=cluster_config, sync_gateway_config=sg_conf ) try: discover_authenticate_endpoint(sg_url, sg_db, "testinvalidscope") except HTTPError: log_info("got expected HTTPError trying to get the authenticate endpoint") # ok we got an exception, which is expected since we are using an invalid scope return raise Exception("Expected HTTPError since we are using invalid scope")
def test_openidconnect_no_session(params_from_base_test_setup, sg_conf_name): """Authenticate with a test openid provider that is configured to NOT add a Set-Cookie header""" cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper = ClusterKeywords() topology = cluster_helper.get_cluster_topology(cluster_config) sg_url = topology["sync_gateways"][0]["public"] sg_db = "db" log_info("Running 'test_openidconnect_no_session'") log_info("Using cluster_config: {}".format(cluster_config)) log_info("Using sg_url: {}".format(sg_url)) log_info("Using sg_db: {}".format(sg_db)) cluster_helper = ClusterKeywords() cluster_helper.reset_cluster( cluster_config=cluster_config, sync_gateway_config=sg_conf ) # multipart/form data content formdata = { 'username': ('', 'testuser'), 'authenticated': ('', 'Return a valid authorization code for this user') } authenticate_url = discover_authenticate_url(sg_url, sg_db, "testnosessions") # Make the request to _oidc_testing response = requests.post(authenticate_url, files=formdata) log_r(response) assert "Set-Cookie" not in response.headers
def test_online_to_offline_check_503(params_from_base_test_setup, sg_conf_name, num_docs): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) log_info("Using num_docs: {}".format(num_docs)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) admin = Admin(cluster.sync_gateways[0]) # all db endpoints should function as expected errors = rest_scan(cluster.sync_gateways[0], db="db", online=True, num_docs=num_docs, user_name="seth", channels=["ABC"]) assert len(errors) == 0 # Take bucket offline status = admin.take_db_offline(db="db") assert status == 200 # all db endpoints should return 503 errors = rest_scan(cluster.sync_gateways[0], db="db", online=False, num_docs=num_docs, user_name="seth", channels=["ABC"]) # We hit NUM_ENDPOINT unique REST endpoints + num of doc PUT failures assert len(errors) == NUM_ENDPOINTS + (num_docs * 2) for error_tuple in errors: log_info("({},{})".format(error_tuple[0], error_tuple[1])) assert error_tuple[1] == 503
def test_single_user_single_channel(params_from_base_test_setup, sg_conf_name): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'single_user_single_channel'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) sgs = cluster.sync_gateways num_seth_docs = 7000 num_cbs_docs = 3000 admin = Admin(sgs[0]) seth = admin.register_user(target=sgs[0], db="db", name="seth", password="******", channels=["ABC"]) cbs_user = admin.register_user(target=sgs[0], db="db", name="cbs_user", password="******", channels=["CBS"]) admin_user = admin.register_user(target=sgs[0], db="db", name="admin", password="******", channels=["ABC", "CBS"]) seth.add_docs(num_seth_docs) cbs_user.add_docs(num_cbs_docs) assert len(seth.cache) == num_seth_docs assert len(cbs_user.cache) == num_cbs_docs assert len(admin_user.cache) == 0 time.sleep(10) verify_changes([seth], expected_num_docs=num_seth_docs, expected_num_revisions=0, expected_docs=seth.cache) verify_changes([cbs_user], expected_num_docs=num_cbs_docs, expected_num_revisions=0, expected_docs=cbs_user.cache) all_doc_caches = [seth.cache, cbs_user.cache] all_docs = {k: v for cache in all_doc_caches for k, v in cache.items()} verify_changes([admin_user], expected_num_docs=num_cbs_docs + num_seth_docs, expected_num_revisions=0, expected_docs=all_docs)
def test_db_offline_tap_loss_sanity(params_from_base_test_setup, sg_conf_name, num_docs): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) log_info("Using num_docs: {}".format(num_docs)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) # all db rest enpoints should succeed errors = rest_scan(cluster.sync_gateways[0], db="db", online=True, num_docs=num_docs, user_name="seth", channels=["ABC"]) assert len(errors) == 0 # Delete bucket to sever TAP feed status = cluster.servers[0].delete_bucket("data-bucket") assert status == 0 # Check that bucket is in offline state errors = rest_scan(cluster.sync_gateways[0], db="db", online=False, num_docs=num_docs, user_name="seth", channels=["ABC"]) assert len(errors) == NUM_ENDPOINTS + (num_docs * 2) for error_tuple in errors: log_info("({},{})".format(error_tuple[0], error_tuple[1])) assert error_tuple[1] == 503
def test_openidconnect_oidc_challenge_invalid_provider_name(params_from_base_test_setup, sg_conf_name): """ If oidc_challenge is called with an invalid provider name, it should not return an Www-Authenticate header """ cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper = ClusterKeywords() topology = cluster_helper.get_cluster_topology(cluster_config) sg_url = topology["sync_gateways"][0]["public"] sg_db = "db" log_info("Running 'test_openidconnect_oidc_challenge_invalid_provider_name'") log_info("Using cluster_config: {}".format(cluster_config)) log_info("Using sg_url: {}".format(sg_url)) log_info("Using sg_db: {}".format(sg_db)) cluster_helper = ClusterKeywords() cluster_helper.reset_cluster( cluster_config=cluster_config, sync_gateway_config=sg_conf ) # make a request to the _oidc_challenge endpoint oidc_challenge_url = "{}/{}/_oidc_challenge?provider={}".format(sg_url, sg_db, "bogusprovider") response = requests.get(oidc_challenge_url) log_info("response.headers: {}".format(response.headers)) assert "Www-Authenticate" not in response.headers assert response.status_code == 400
def params_from_base_suite_setup(request): log_info("Setting up 'params_from_base_suite_setup' ...") server_version = request.config.getoption("--server-version") sync_gateway_version = request.config.getoption("--sync-gateway-version") mode = request.config.getoption("--mode") skip_provisioning = request.config.getoption("--skip-provisioning") log_info("server_version: {}".format(server_version)) log_info("sync_gateway_version: {}".format(sync_gateway_version)) log_info("mode: {}".format(mode)) log_info("skip_provisioning: {}".format(skip_provisioning)) # Make sure mode for sync_gateway is supported ('cc' or 'di') validate_sync_gateway_mode(mode) # use base_cc cluster config if mode is "cc" or base_di cluster config if more is "di" cluster_config = "{}/multiple_sync_gateways_{}".format(constants.CLUSTER_CONFIGS_DIR, mode) sg_config = sync_gateway_config_path_for_mode("sync_gateway_default_functional_tests", mode) # Skip provisioning if user specifies '--skip-provisoning' if not skip_provisioning: cluster_helper = ClusterKeywords() cluster_helper.provision_cluster( cluster_config=cluster_config, server_version=server_version, sync_gateway_version=sync_gateway_version, sync_gateway_config=sg_config ) yield {"cluster_config": cluster_config, "mode": mode} log_info("Tearing down 'params_from_base_suite_setup' ...")
def test_log_rotation_negative(params_from_base_test_setup, sg_conf_name): """Test log rotation with negative values for: "maxsize": -1, "maxage": -30, "maxbackups": -2 SG shouldn't start """ cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] xattrs_enabled = params_from_base_test_setup["xattrs_enabled"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) cluster_helper = ClusterKeywords() cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf) sg_one_url = cluster_hosts["sync_gateways"][0]["public"] # read sample sg_conf data = load_sync_gateway_config(sg_conf, mode, cluster_hosts["couchbase_servers"][0], xattrs_enabled, cluster_conf) # set negative values for rotation section data['logging']["default"]["rotation"] = { "maxsize": -1, "maxage": -30, "maxbackups": -2, "localtime": True } # create temp config file in the same folder as sg_conf temp_conf = "/".join(sg_conf.split('/')[:-2]) + '/temp_conf.json' with open(temp_conf, 'w') as fp: json.dump(data, fp) # Stop sync_gateways log_info(">>> Stopping sync_gateway") sg_helper = SyncGateway() sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url) try: sg_helper.start_sync_gateways(cluster_config=cluster_conf, url=sg_one_url, config=temp_conf) except ProvisioningError: sg_helper.start_sync_gateways(cluster_config=cluster_conf, url=sg_one_url, config=sg_conf) # Remove generated conf file os.remove(temp_conf) return # Remove generated conf file os.remove(temp_conf) pytest.fail("SG shouldn't be started!!!!")
def test_offline_false_config_rest(params_from_base_test_setup, sg_conf_name, num_docs): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) log_info("Using num_docs: {}".format(num_docs)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) # all db endpoints should function as expected errors = rest_scan(cluster.sync_gateways[0], db="db", online=True, num_docs=num_docs, user_name="seth", channels=["ABC"]) assert len(errors) == 0 # Scenario 4 # Check the db has an Online state at each running sync_gateway for sg in cluster.sync_gateways: admin = Admin(sg) db_info = admin.get_db_info("db") assert db_info["state"] == "Online"
def test_offline_true_config_bring_online(params_from_base_test_setup, sg_conf_name, num_docs): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) log_info("Using num_docs: {}".format(num_docs)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) admin = Admin(cluster.sync_gateways[0]) # all db endpoints should fail with 503 errors = rest_scan(cluster.sync_gateways[0], db="db", online=False, num_docs=num_docs, user_name="seth", channels=["ABC"]) assert len(errors) == NUM_ENDPOINTS + (num_docs * 2) for error_tuple in errors: log_info("({},{})".format(error_tuple[0], error_tuple[1])) assert error_tuple[1] == 503 # Scenario 9 # POST /db/_online status = admin.bring_db_online(db="db") assert status == 200 # all db endpoints should succeed errors = rest_scan(cluster.sync_gateways[0], db="db", online=True, num_docs=num_docs, user_name="seth", channels=["ABC"]) assert len(errors) == 0
def test_muliple_users_single_channel(params_from_base_test_setup, sg_conf_name): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'muliple_users_single_channel'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) sgs = cluster.sync_gateways num_docs_seth = 1000 num_docs_adam = 2000 num_docs_traun = 3000 admin = Admin(sgs[0]) seth = admin.register_user(target=sgs[0], db="db", name="seth", password="******", channels=["ABC"]) adam = admin.register_user(target=sgs[0], db="db", name="adam", password="******", channels=["ABC"]) traun = admin.register_user(target=sgs[0], db="db", name="traun", password="******", channels=["ABC"]) seth.add_docs(num_docs_seth) # ABC adam.add_docs(num_docs_adam, bulk=True) # ABC traun.add_docs(num_docs_traun, bulk=True) # ABC assert len(seth.cache) == num_docs_seth assert len(adam.cache) == num_docs_adam assert len(traun.cache) == num_docs_traun # discuss appropriate time with team time.sleep(10) # Each user should get all docs from all users all_caches = [seth.cache, adam.cache, traun.cache] all_docs = {k: v for cache in all_caches for k, v in cache.items()} verify_changes([seth, adam, traun], expected_num_docs=num_docs_seth + num_docs_adam + num_docs_traun, expected_num_revisions=0, expected_docs=all_docs)
def test_seq(params_from_base_test_setup, sg_conf_name, num_users, num_docs, num_revisions): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running seq") log_info("cluster_conf: {}".format(cluster_conf)) log_info("sg_conf: {}".format(sg_conf)) log_info("num_users: {}".format(num_users)) log_info("num_docs: {}".format(num_docs)) log_info("num_revisions: {}".format(num_revisions)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) admin = Admin(cluster.sync_gateways[0]) # all users will share docs due to having the same channel users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="user", number=num_users, password="******", channels=["ABC"]) for user in users: user.add_docs(num_docs, bulk=True) for user in users: user.update_docs(num_revisions) time.sleep(5) user_0_changes = users[0].get_changes(since=0) doc_seq = user_0_changes["results"][num_docs / 2]["seq"] # https://github.com/couchbase/sync_gateway/issues/1475#issuecomment-172426052 # verify you can issue _changes with since=12313-0::1023.15 for user in users: changes = user.get_changes(since=doc_seq) log_info("Trying changes with since={}".format(doc_seq)) assert len(changes["results"]) > 0 second_to_last_doc_entry_seq = changes["results"][-2]["seq"] last_doc_entry_seq = changes["results"][-1]["seq"] log_info('Second to last doc "seq": {}'.format(second_to_last_doc_entry_seq)) log_info('Last doc "seq": {}'.format(last_doc_entry_seq)) if mode == "di": # Verify last "seq" follows the formate 12313-0, not 12313-0::1023.15 log_info('Verify that the last "seq" is a plain hashed value') assert len(second_to_last_doc_entry_seq.split("::")) == 2 assert len(last_doc_entry_seq.split("::")) == 1 elif mode == "cc": assert second_to_last_doc_entry_seq > 0 assert last_doc_entry_seq > 0 else: raise ValueError("Unsupported 'mode' !!") all_doc_caches = [user.cache for user in users] all_docs = {k: v for cache in all_doc_caches for k, v in cache.items()} verify_changes(users, expected_num_docs=num_users * num_docs, expected_num_revisions=num_revisions, expected_docs=all_docs)
def test_rolling_ttl_remove_expirary(params_from_base_test_setup, sg_conf_name): """ 1. PUT /db/doc1 via SG with property "_exp":3 2. Once per second for 10 seconds, update /db/doc1 with a new revision (also with "_exp":3) 3. Update /db/doc1 with a revision with no expiry 3. Get /db/doc1. Assert response is 200 """ cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper = ClusterKeywords() topology = cluster_helper.get_cluster_topology(cluster_config) cluster_helper.reset_cluster( cluster_config=cluster_config, sync_gateway_config=sg_conf ) cbs_url = topology["couchbase_servers"][0] sg_url = topology["sync_gateways"][0]["public"] sg_url_admin = topology["sync_gateways"][0]["admin"] log_info("Running 'test_rolling_ttl_remove_expirary'") log_info("cbs_url: {}".format(cbs_url)) log_info("sg_url: {}".format(sg_url)) log_info("sg_url_admin: {}".format(sg_url_admin)) sg_db = "db" sg_user_name = "sg_user" sg_user_password = "******" sg_user_channels = ["NBC", "ABC"] client = MobileRestClient() client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels) sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name) doc_exp_3_body = document.create_doc(doc_id="exp_3", expiry=3, channels=sg_user_channels) doc_exp_10_body = document.create_doc(doc_id="exp_10", expiry=10, channels=sg_user_channels) doc_exp_3 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_3_body, auth=sg_user_session) doc_exp_10 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_10_body, auth=sg_user_session) client.update_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], number_updates=10, expiry=3, delay=1, auth=sg_user_session) client.update_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], number_updates=1, auth=sg_user_session) # If expiry was not removed in the last update, this would expire doc_exp_3 time.sleep(5) # doc_exp_3 should still be around due to removal of expiry doc_exp_3 = client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], auth=sg_user_session) assert doc_exp_3["_id"] == "exp_3" # doc_exp_10 should be expired due to the updates (10s) + sleep (5s) with pytest.raises(HTTPError) as he: client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_10["id"], auth=sg_user_session) assert he.value[0].startswith("404 Client Error: Not Found for url:")
def test_rolling_ttl_expires(params_from_base_test_setup, sg_conf_name): """ 1. PUT /db/doc1 via SG with property "_exp":3 2. Update /db/doc1 10 times with a new revision (also with "_exp":3) 3. Wait 5 seconds 4. Get /db/doc1. Assert response is 200 """ cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper = ClusterKeywords() topology = cluster_helper.get_cluster_topology(cluster_config) cluster_helper.reset_cluster( cluster_config=cluster_config, sync_gateway_config=sg_conf ) cbs_url = topology["couchbase_servers"][0] sg_url = topology["sync_gateways"][0]["public"] sg_url_admin = topology["sync_gateways"][0]["admin"] log_info("Running 'test_rolling_ttl_expires'") log_info("cbs_url: {}".format(cbs_url)) log_info("sg_url: {}".format(sg_url)) log_info("sg_url_admin: {}".format(sg_url_admin)) sg_db = "db" sg_user_name = "sg_user" sg_user_password = "******" sg_user_channels = ["NBC", "ABC"] client = MobileRestClient() client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels) sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name) doc_exp_3_body = document.create_doc(doc_id="exp_3", expiry=3, channels=sg_user_channels) doc_exp_10_body = document.create_doc(doc_id="exp_10", expiry=10, channels=sg_user_channels) doc_exp_3 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_3_body, auth=sg_user_session) doc_exp_10 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_10_body, auth=sg_user_session) client.update_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], number_updates=10, expiry=3, auth=sg_user_session) # Sleep should allow doc_exp_3 to expire, but still be in the window to get doc_exp_10 time.sleep(5) # doc_exp_3 should be expired with pytest.raises(HTTPError) as he: client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], auth=sg_user_session) assert he.value[0].startswith("404 Client Error: Not Found for url:") # doc_exp_10 should be available still doc_exp_10_result = client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_10["id"], auth=sg_user_session) assert doc_exp_10_result["_id"] == "exp_10"
def test_db_online_offline_webhooks_offline_two(params_from_base_test_setup, sg_conf_name, num_users, num_channels, num_docs, num_revisions): start = time.time() cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'test_db_online_offline_webhooks_offline_two'") log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using num_users: {}".format(num_users)) log_info("Using num_channels: {}".format(num_channels)) log_info("Using num_docs: {}".format(num_docs)) log_info("Using num_revisions: {}".format(num_revisions)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_conf) init_completed = time.time() log_info("Initialization completed. Time taken:{}s".format(init_completed - start)) channels = ["channel-" + str(i) for i in range(num_channels)] password = "******" ws = WebServer() ws.start() sgs = cluster.sync_gateways admin = Admin(sgs[0]) # Register User log_info("Register User") user_objects = admin.register_bulk_users(target=sgs[0], db="db", name_prefix="User", number=num_users, password=password, channels=channels) # Add User log_info("Add docs") in_parallel(user_objects, 'add_docs', num_docs) # Update docs log_info("Update docs") in_parallel(user_objects, 'update_docs', num_revisions) time.sleep(10) status = cluster.servers[0].delete_bucket("data-bucket") assert status == 0 log_info("Sleeping for 120 seconds...") time.sleep(120) webhook_events = ws.get_data() time.sleep(5) log_info("webhook event {}".format(webhook_events)) last_event = webhook_events[-1] assert last_event['state'] == 'offline' ws.stop()
def test_writing_attachment_to_couchbase_server(params_from_base_test_setup, sg_conf_name): """ 1. Start sync_gateway with sync function that rejects all writes: function(doc, oldDoc) { throw({forbidden:"No writes!"}); } 2. Create a doc with attachment 3. Use CBS sdk to see if attachment doc exists. Doc ID will look like _sync:att:sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0= (where the suffix is the digest) 4. Assert att doc does not exist """ cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper = ClusterKeywords() cluster_helper.reset_cluster(cluster_config, sg_conf) topology = cluster_helper.get_cluster_topology(cluster_config) cbs_url = topology["couchbase_servers"][0] sg_url = topology["sync_gateways"][0]["public"] sg_url_admin = topology["sync_gateways"][0]["admin"] sg_db = "db" bucket = "data-bucket" log_info("Running 'test_writing_attachment_to_couchbase_server'") log_info("Using cbs_url: {}".format(cbs_url)) log_info("Using sg_url: {}".format(sg_url)) log_info("Using sg_url_admin: {}".format(sg_url_admin)) log_info("Using sg_db: {}".format(sg_db)) log_info("Using bucket: {}".format(bucket)) sg_user_name = "sg_user" sg_user_password = "******" sg_user_channels = ["NBC"] client = MobileRestClient() client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels) sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name) docs = client.add_docs(url=sg_url, db=sg_db, number=100, id_prefix=sg_db, channels=sg_user_channels, auth=sg_user_session) assert len(docs) == 100 # Create doc with attachment and push to sync_gateway doc_with_att = document.create_doc(doc_id="att_doc", content={"sample_key": "sample_val"}, attachment_name="sample_text.txt", channels=sg_user_channels) client.add_doc(url=sg_url, db=sg_db, doc=doc_with_att, auth=sg_user_session) server = CouchbaseServer(cbs_url) # Assert that the attachment doc gets written to couchbase server server_att_docs = server.get_server_docs_with_prefix(bucket=bucket, prefix="_sync:att:") num_att_docs = len(server_att_docs) assert num_att_docs == 1
def test_removing_expiry(params_from_base_test_setup, sg_conf_name): """ 1. PUT /db/doc1 via SG with property "_exp":3 2. Update /db/doc1 with a new revision with no expiry value 3. After 10 updates, update /db/doc1 with a revision with no expiry """ cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper = ClusterKeywords() topology = cluster_helper.get_cluster_topology(cluster_config) cluster_helper.reset_cluster( cluster_config=cluster_config, sync_gateway_config=sg_conf ) cbs_url = topology["couchbase_servers"][0] sg_url = topology["sync_gateways"][0]["public"] sg_url_admin = topology["sync_gateways"][0]["admin"] log_info("Running 'test_removing_expiry'") log_info("cbs_url: {}".format(cbs_url)) log_info("sg_url: {}".format(sg_url)) log_info("sg_url_admin: {}".format(sg_url_admin)) sg_db = "db" sg_user_name = "sg_user" sg_user_password = "******" sg_user_channels = ["NBC", "ABC"] client = MobileRestClient() client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels) sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name) doc_exp_3_body = document.create_doc(doc_id="exp_3", expiry=3, channels=sg_user_channels) doc_exp_10_body = document.create_doc(doc_id="exp_10", expiry=10, channels=sg_user_channels) doc_exp_3 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_3_body, auth=sg_user_session) doc_exp_10 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_10_body, auth=sg_user_session) doc_exp_3_updated = client.update_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], number_updates=10, auth=sg_user_session) # Sleep should allow an expiry to happen on doc_exp_3 if it had not been removed. # Expected behavior is that the doc_exp_3 will still be around due to the removal of the expiry time.sleep(5) # doc_exp_3 should no longer have an expiry and should not raise an exception doc_exp_3_updated_result = client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3_updated["id"], auth=sg_user_session) assert doc_exp_3_updated_result["_id"] == "exp_3" # doc_exp_10 should be available still and should not raise an exception doc_exp_10_result = client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_10["id"], auth=sg_user_session) assert doc_exp_10_result["_id"] == "exp_10"
def test_setting_expiry_in_bulk_docs(params_from_base_test_setup, sg_conf_name): """ 1. PUT /db/_bulk_docs with 10 documents. Set the "_exp":3 on 5 of these documents 2. Wait five seconds 3. POST /db/_bulk_get for the 10 documents. Validate that only the 5 non-expiring documents are returned """ cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper = ClusterKeywords() topology = cluster_helper.get_cluster_topology(cluster_config) cluster_helper.reset_cluster( cluster_config=cluster_config, sync_gateway_config=sg_conf ) cbs_url = topology["couchbase_servers"][0] sg_url = topology["sync_gateways"][0]["public"] sg_url_admin = topology["sync_gateways"][0]["admin"] log_info("Running 'test_setting_expiry_in_bulk_docs'") log_info("cbs_url: {}".format(cbs_url)) log_info("sg_url: {}".format(sg_url)) log_info("sg_url_admin: {}".format(sg_url_admin)) sg_db = "db" sg_user_name = "sg_user" sg_user_password = "******" sg_user_channels = ["NBC", "ABC"] client = MobileRestClient() client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels) sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name) doc_exp_3_bodies = document.create_docs(doc_id_prefix="exp_3", number=5, expiry=3, channels=sg_user_channels) doc_exp_10_bodies = document.create_docs(doc_id_prefix="exp_10", number=5, expiry=10, channels=sg_user_channels) bulk_bodies = doc_exp_3_bodies + doc_exp_10_bodies bulk_docs = client.add_bulk_docs(url=sg_url, db=sg_db, docs=bulk_bodies, auth=sg_user_session) # Allow exp_3 docs to expire time.sleep(5) bulk_get_docs = client.get_bulk_docs(url=sg_url, db=sg_db, docs=bulk_docs, auth=sg_user_session) expected_ids = ["exp_10_0", "exp_10_1", "exp_10_2", "exp_10_3", "exp_10_4"] expected_missing_ids = ["exp_3_0", "exp_3_1", "exp_3_2", "exp_3_3", "exp_3_4"] client.verify_doc_ids_found_in_response(response=bulk_get_docs, expected_doc_ids=expected_ids) client.verify_doc_ids_not_found_in_response(response=bulk_get_docs, expected_missing_doc_ids=expected_missing_ids)
def test_openidconnect_large_scope(params_from_base_test_setup, sg_conf_name): """Authenticate against a test provider config that only has a larger scope than the default, and make sure things like the nickname are returned in the jwt token returned back""" cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper = ClusterKeywords() topology = cluster_helper.get_cluster_topology(cluster_config) sg_url = topology["sync_gateways"][0]["public"] sg_db = "db" log_info("Running 'test_openidconnect_large_scope'") log_info("Using cluster_config: {}".format(cluster_config)) log_info("Using sg_url: {}".format(sg_url)) log_info("Using sg_db: {}".format(sg_db)) cluster_helper = ClusterKeywords() cluster_helper.reset_cluster( cluster_config=cluster_config, sync_gateway_config=sg_conf ) # multipart/form data content formdata = { 'username': ('', 'testuser'), 'authenticated': ('', 'Return a valid authorization code for this user') } # get the authenticate endpoint and query params, should look something like: # authenticate?client_id=sync_gateway&redirect_uri= ... authenticate_endpoint = discover_authenticate_endpoint(sg_url, sg_db, "testlargescope") # build the full url url = "{}/{}/_oidc_testing/{}".format( sg_url, sg_db, authenticate_endpoint ) # Make the request to _oidc_testing response = requests.post(url, files=formdata) log_r(response) # extract the token from the response response_json = response.json() id_token = response_json["id_token"] # {u'iss': u'http://localhost:4984/db/_oidc_testing', u'iat': 1466050188, u'aud': u'sync_gateway', u'exp': 1466053788, u'sub': u'testuser'} decoded_id_token = jwt.decode(id_token, verify=False) log_info("decoded_id_token: {}".format(decoded_id_token)) assert "nickname" in decoded_id_token.keys()
def test_sync_channel_sanity(params_from_base_test_setup, sg_conf_name): num_docs_per_channel = 100 channels = ["ABC", "NBC", "CBS"] cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'sync_channel_sanity'") log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) admin = Admin(cluster.sync_gateways[0]) doc_pushers = [] doc_pusher_caches = [] # Push some ABC docs for channel in channels: doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="{}_doc_pusher".format(channel), password="******", channels=[channel]) doc_pusher.add_docs(num_docs_per_channel, bulk=True) doc_pushers.append(doc_pusher) doc_pusher_caches.append(doc_pusher.cache) # Verfy that none of the doc_pushers get docs. They should all be redirected by the sync function verify_changes(doc_pushers, expected_num_docs=0, expected_num_revisions=0, expected_docs={}) subscriber = admin.register_user(target=cluster.sync_gateways[0], db="db", name="subscriber", password="******", channels=["tv_station_channel"]) # Allow docs to backfill time.sleep(20) # subscriber should recieve all docs all_docs = {k: v for cache in doc_pusher_caches for k, v in cache.items()} verify_changes(subscriber, expected_num_docs=len(channels) * num_docs_per_channel, expected_num_revisions=0, expected_docs=all_docs) # update subscribers cache so the user knows what docs to update subscriber.cache = all_docs subscriber.update_docs(num_revs_per_doc=1) # Allow docs to backfill time.sleep(20) # Verify the doc are back in the repective ABC, NBC, CBS channels # HACK: Ignoring rev_id verification due to the fact that the doc was updated the the subscriber user and not the # doc_pusher for doc_pusher in doc_pushers: verify_changes(doc_pusher, expected_num_docs=num_docs_per_channel, expected_num_revisions=1, expected_docs=doc_pusher.cache, ignore_rev_ids=True) # Verify that all docs have been flaged with _removed = true in changes feed for subscriber verify_docs_removed(subscriber, expected_num_docs=len(all_docs.items()), expected_docs=all_docs)
def test_load_balance_sanity(params_from_base_test_setup): cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf_name = "sync_gateway_default_functional_tests" sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_util = ClusterKeywords() cluster_util.reset_cluster( cluster_config=cluster_config, sync_gateway_config=sg_conf_path ) topology = cluster_util.get_cluster_topology(cluster_config) admin_sg_one = topology["sync_gateways"][0]["admin"] lb_url = "{}:4984".format(topology["load_balancers"][0]) sg_db = "db" num_docs = 1000 sg_user_name = "seth" sg_user_password = "******" channels = ["ABC", "CBS"] client = MobileRestClient() user = client.create_user(admin_sg_one, sg_db, sg_user_name, sg_user_password, channels=channels) session = client.create_session(admin_sg_one, sg_db, sg_user_name) log_info(user) log_info(session) log_info("Adding docs to the load balancer ...") ct = ChangesTracker(url=lb_url, db=sg_db, auth=session) with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor: log_info("Starting ...") ct_task = executor.submit(ct.start) log_info("Adding docs ...") docs = client.add_docs(lb_url, sg_db, num_docs, "test_doc", channels=channels, auth=session) assert len(docs) == num_docs log_info("Adding docs done") wait_for_changes = executor.submit(ct.wait_until, docs) if wait_for_changes.result(): log_info("Stopping ...") log_info("Found all docs ...") executor.submit(ct.stop) ct_task.result() else: executor.submit(ct.stop) ct_task.result() raise Exception("Could not find all changes in feed before timeout!!")
def test_load_balance_sanity(params_from_base_test_setup): cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf_name = "sync_gateway_default_functional_tests" sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_util = ClusterKeywords() cluster_util.reset_cluster( cluster_config=cluster_config, sync_gateway_config=sg_conf_path ) topology = cluster_util.get_cluster_topology(cluster_config) admin_sg_one = topology["sync_gateways"][0]["admin"] lb_url = topology["load_balancers"][0] sg_db = "db" num_docs = 1000 sg_user_name = "seth" sg_user_password = "******" channels = ["ABC", "CBS"] client = MobileRestClient() user = client.create_user(admin_sg_one, sg_db, sg_user_name, sg_user_password, channels=channels) session = client.create_session(admin_sg_one, sg_db, sg_user_name) log_info(user) log_info(session) log_info("Adding docs to the load balancer ...") ct = ChangesTracker(url=lb_url, db=sg_db, auth=session) with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor: log_info("Starting ...") ct_task = executor.submit(ct.start) log_info("Adding docs ...") docs = client.add_docs(lb_url, sg_db, num_docs, "test_doc", channels=channels, auth=session) assert len(docs) == num_docs log_info("Adding docs done") wait_for_changes = executor.submit(ct.wait_until, docs) if wait_for_changes.result(): log_info("Stopping ...") log_info("Found all docs ...") executor.submit(ct.stop) ct_task.result() else: executor.submit(ct.stop) ct_task.result() raise Exception("Could not find all changes in feed before timeout!!")
def test_no_conflicts_enabled(params_from_base_test_setup, sg_conf_name, num_of_docs): """ @summary : Enable no conflicts and verify conflicts are not created Test case link : https://docs.google.com/spreadsheets/d/1YwI_gCeoBebQKBybkzoAEoXSc0XLReszDA-mPFQapgk/edit#gid=0 covered #3 Steps: 1. Enable allow_conflicts = false in SG config 2. Add docs to SG. 3. Update the docs few times. 4. Try to create a conflict. 5. Check the revision list for the doc 6. Verify no conflicts can be created. 7. Sync Gateway should respond with a 409 upon trying to add a conflict 8. Should be possible to add more than 1 revision (Default revs_limit = 1000) """ # Setup cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] mode = params_from_base_test_setup["mode"] sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] no_conflicts_enabled = params_from_base_test_setup["no_conflicts_enabled"] sg_db = "db" if not no_conflicts_enabled: pytest.skip('--no-conflicts is not enabled, so skipping the test') sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) c = cluster.Cluster(cluster_config) c.reset(sg_conf) sg_client = MobileRestClient() channels = ["no-conflicts"] sg_client.create_user(url=sg_admin_url, db=sg_db, name='autotest', password='******', channels=channels) autouser_session = sg_client.create_session(url=sg_admin_url, db=sg_db, name='autotest', password='******') # end of Set up # 2. Add docs to SG. sgdoc_bodies = document.create_docs(doc_id_prefix='sg_docs', number=num_of_docs, attachments_generator=attachment.generate_2_png_10_10, channels=channels) sg_docs = sg_client.add_bulk_docs(url=sg_url, db=sg_db, docs=sgdoc_bodies, auth=autouser_session) assert len(sgdoc_bodies) == num_of_docs # 3. Update the docs few times sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=3, delay=None, auth=autouser_session, channels=channels) # 4. Try to create a conflict for doc in sg_docs: with pytest.raises(HTTPError) as he: sg_client.add_conflict(url=sg_url, db=sg_db, doc_id=doc["id"], parent_revisions=doc["rev"], new_revision="2-foo", auth=autouser_session) assert he.value.message.startswith('409 Client Error: Conflict for url:') # 6. Update the docs 1 more time sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=1, delay=None, auth=autouser_session, channels=channels)
def test_sg_replicate_push_async(params_from_base_test_setup, num_docs): assert num_docs > 0 # if the async stuff works, we should be able to kick off a large # push replication and get a missing doc before the replication has # a chance to finish. And then we should later see that doc. cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] log_info("Running 'test_sg_replicate_push_async'") log_info("Using cluster_config: {}".format(cluster_config)) config = sync_gateway_config_path_for_mode("sync_gateway_sg_replicate", mode) sg1, sg2 = create_sync_gateways( cluster_config=cluster_config, sg_config_path=config ) admin = Admin(sg1) admin.admin_url = sg1.url sg1_user, sg2_user = create_sg_users(sg1, sg2, DB1, DB2) # Add docs to sg1 doc_ids_added = [] last_doc_id_added = None for i in xrange(num_docs): doc_id = sg1_user.add_doc() doc_ids_added.append(doc_id) last_doc_id_added = doc_id # Wait until doc shows up on sg1's changes feed wait_until_doc_in_changes_feed(sg1, DB1, last_doc_id_added) # try to get the last doc added from the target -- assert that we get an exception assert_does_not_have_doc(sg2_user, last_doc_id_added) # kick off a one-off push replication with async=true sg1.start_push_replication( sg2.admin.admin_url, DB1, DB2, continuous=False, use_remote_source=True, async=True, use_admin_url=True ) # wait until that doc shows up on the target wait_until_doc_sync(sg2_user, last_doc_id_added) # At this point, the active tasks should be empty wait_until_active_tasks_empty(sg1)
def test_online_to_offline_longpoll_changes_feed_controlled_close_sanity_mulitple_users(params_from_base_test_setup, sg_conf_name, num_docs, num_users): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) log_info("Using num_docs: {}".format(num_docs)) log_info("Using num_users: {}".format(num_users)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) admin = Admin(cluster.sync_gateways[0]) users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="user", password="******", number=num_users, channels=["ABC"]) feed_close_results = list() with concurrent.futures.ThreadPoolExecutor(max_workers=libraries.testkit.settings.MAX_REQUEST_WORKERS) as executor: # start longpoll tracking with no timeout, will block until longpoll is closed by db going offline futures = {executor.submit(user.start_longpoll_changes_tracking, termination_doc_id=None, timeout=0, loop=False): user.name for user in users} time.sleep(5) futures[executor.submit(admin.take_db_offline, "db")] = "db_offline_task" for future in concurrent.futures.as_completed(futures): task_name = futures[future] if task_name == "db_offline_task": log_info("DB OFFLINE") # make sure db_offline returns 200 assert future.result() == 200 if task_name.startswith("user"): # Long poll will exit with 503, return docs in the exception log_info("POLLING DONE") try: docs_in_changes, last_seq_num = future.result() feed_close_results.append((docs_in_changes, last_seq_num)) except Exception as e: log_info("Longpoll feed close error: {}".format(e)) # long poll should be closed so this exception should never happen assert 0 # Assert that the feed close results length is num_users assert len(feed_close_results) == num_users # Account for _user doc # last_seq may be of the form '1' for channel cache or '1-0' for distributed index for feed_result in feed_close_results: docs_in_changes = feed_result[0] seq_num_component = feed_result[1].split("-") assert len(docs_in_changes) == 0 assert int(seq_num_component[0]) > 0
def test_single_user_single_channel_doc_updates(params_from_base_test_setup, sg_conf_name, num_docs, num_revisions): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log.info("Running 'single_user_single_channel_doc_updates'") log.info("cluster_conf: {}".format(cluster_conf)) log.info("sg_conf: {}".format(sg_conf)) log.info("num_docs: {}".format(num_docs)) log.info("num_revisions: {}".format(num_revisions)) start = time.time() cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) num_docs = num_docs num_revisions = num_revisions username = "******" password = "******" channels = ["channel-1"] sgs = cluster.sync_gateways admin = Admin(sgs[0]) single_user = admin.register_user(target=sgs[0], db="db", name=username, password=password, channels=channels) # Not using bulk docs single_user.add_docs(num_docs, name_prefix="test-") assert len(single_user.cache) == num_docs # let SG catch up with all the changes time.sleep(5) single_user.update_docs(num_revisions) time.sleep(10) verify_changes([single_user], expected_num_docs=num_docs, expected_num_revisions=num_revisions, expected_docs=single_user.cache) end = time.time() log.info("TIME:{}s".format(end - start))
def test_sync_sanity_backfill(params_from_base_test_setup, sg_conf_name): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'sync_sanity_backfill'") log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) radio_stations = ["KMOW", "HWOD", "KDWB"] number_of_docs_per_pusher = 5000 admin = Admin(cluster.sync_gateways[0]) dj_0 = admin.register_user(target=cluster.sync_gateways[0], db="db", name="dj_0", password="******") kdwb_caches = [] for radio_station in radio_stations: doc_pusher = admin.register_user( target=cluster.sync_gateways[0], db="db", name="{}_doc_pusher".format(radio_station), password="******", channels=[radio_station]) doc_pusher.add_docs(number_of_docs_per_pusher, bulk=True) if doc_pusher.name == "KDWB_doc_pusher": kdwb_caches.append(doc_pusher.cache) access_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="access_doc_pusher", password="******") # Grant dj_0 access to KDWB channel via sync after docs are pushed access_doc_pusher.add_doc("access_doc", content="access") # Build global doc_id, rev dict for all docs from all KDWB caches kdwb_docs = {k: v for cache in kdwb_caches for k, v in cache.items()} # wait for changes time.sleep(5) verify_changes(dj_0, expected_num_docs=number_of_docs_per_pusher, expected_num_revisions=0, expected_docs=kdwb_docs)
def test_log_rotation_invalid_path(params_from_base_test_setup, sg_conf_name): """Test to check that SG is not started with invalid logFilePath. OS specific case. SG should check if path correct on startup """ cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] xattrs_enabled = params_from_base_test_setup["xattrs_enabled"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) cluster_helper = ClusterKeywords() cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf) sg_one_url = cluster_hosts["sync_gateways"][0]["public"] # read sample sg_conf data = load_sync_gateway_config(sg_conf, mode, cluster_hosts["couchbase_servers"][0], xattrs_enabled, cluster_conf) # set non existing logFilePath data['logging']["default"]["logFilePath"] = "/12345/1231/131231.log" # create temp config file in the same folder as sg_conf temp_conf = "/".join(sg_conf.split('/')[:-2]) + '/temp_conf.json' with open(temp_conf, 'w') as fp: json.dump(data, fp) # Stop sync_gateways log_info(">>> Stopping sync_gateway") sg_helper = SyncGateway() sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url) try: sg_helper.start_sync_gateways(cluster_config=cluster_conf, url=sg_one_url, config=temp_conf) except ProvisioningError: sg_helper.start_sync_gateways(cluster_config=cluster_conf, url=sg_one_url, config=sg_conf) # Remove generated conf file os.remove(temp_conf) return # Remove generated conf file os.remove(temp_conf) pytest.fail("SG shouldn't be started!!!!")
def test_multiple_users_multiple_channels(params_from_base_test_setup, sg_conf_name): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'multiple_users_multiple_channels'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) num_docs_seth = 1000 num_docs_adam = 2000 num_docs_traun = 3000 sgs = cluster.sync_gateways admin = Admin(sgs[0]) seth = admin.register_user(target=sgs[0], db="db", name="seth", password="******", channels=["ABC"]) adam = admin.register_user(target=sgs[0], db="db", name="adam", password="******", channels=["NBC", "CBS"]) traun = admin.register_user(target=sgs[0], db="db", name="traun", password="******", channels=["ABC", "NBC", "CBS"]) # TODO use bulk docs seth.add_docs(num_docs_seth) # ABC adam.add_docs(num_docs_adam) # NBC, CBS traun.add_docs(num_docs_traun) # ABC, NBC, CBS assert len(seth.cache) == num_docs_seth assert len(adam.cache) == num_docs_adam assert len(traun.cache) == num_docs_traun # discuss appropriate time with team time.sleep(10) # Seth should get docs from seth + traun seth_subset = [seth.cache, traun.cache] seth_expected_docs = {k: v for cache in seth_subset for k, v in cache.items()} verify_changes([seth], expected_num_docs=num_docs_seth + num_docs_traun, expected_num_revisions=0, expected_docs=seth_expected_docs) # Adam should get docs from adam + traun adam_subset = [adam.cache, traun.cache] adam_expected_docs = {k: v for cache in adam_subset for k, v in cache.items()} verify_changes([adam], expected_num_docs=num_docs_adam + num_docs_traun, expected_num_revisions=0, expected_docs=adam_expected_docs) # Traun should get docs from seth + adam + traun traun_subset = [seth.cache, adam.cache, traun.cache] traun_expected_docs = {k: v for cache in traun_subset for k, v in cache.items()} verify_changes([traun], expected_num_docs=num_docs_seth + num_docs_adam + num_docs_traun, expected_num_revisions=0, expected_docs=traun_expected_docs)
def test_revs_cache_size(params_from_base_test_setup, sg_conf_name, num_of_docs): """ @summary Test for no-conflicts with rev_cache size Test case link : https://docs.google.com/spreadsheets/d/1YwI_gCeoBebQKBybkzoAEoXSc0XLReszDA-mPFQapgk/edit#gid=0 covered #18 Steps: Note : the sg config have rev_cache_size as 1000 , make sure number of docs is less than 1000 to have the test work with expected behavior 1. Add docs to SG. 2. Get the docs 3. Verify number of rev_cache_hits is same as number of docs if rev_cache_size is more than number of docs. """ # Setup cluster_config = params_from_base_test_setup["cluster_config"] topology = params_from_base_test_setup["cluster_topology"] mode = params_from_base_test_setup["mode"] sg_url = topology["sync_gateways"][0]["public"] sg_admin_url = topology["sync_gateways"][0]["admin"] sg_db = "db" retrieved_docs = num_of_docs / 2 sync_gateway_version = params_from_base_test_setup["sync_gateway_version"] if sync_gateway_version < "2.0": pytest.skip('It does not work with sg < 2.0 , so skipping the test') sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) c = cluster.Cluster(cluster_config) c.reset(sg_conf) sg_client = MobileRestClient() channels = ["no-conflicts"] sg_client.create_user(url=sg_admin_url, db=sg_db, name='autotest', password='******', channels=channels) autouser_session = sg_client.create_session(url=sg_admin_url, db=sg_db, name='autotest', password='******') # end of Set up # 2. Add docs to SG. sgdoc_bodies = document.create_docs(doc_id_prefix="sg_docs", number=num_of_docs, channels=channels) sg_docs = sg_client.add_bulk_docs(url=sg_url, db=sg_db, docs=sgdoc_bodies, auth=autouser_session) assert len(sgdoc_bodies) == num_of_docs # 3. Get all docs for i in range(retrieved_docs): doc = sg_docs[i] sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc["id"], auth=autouser_session) # 4. Verify there are number of hits should be same as retrieved docs exp_vars = sg_client.get_expvars(url=sg_admin_url) revision_cache_hits = exp_vars["syncGateway_stats"]["revisionCache_hits"] revision_cache_misses = exp_vars["syncGateway_stats"]["revisionCache_misses"] assert revision_cache_hits == retrieved_docs, "Revision Cache hits did not hit with expected number {}".format(num_of_docs) assert revision_cache_misses == 0, "Revision Cache misses is not 0"
def test_log_logLevel_invalid(params_from_base_test_setup, sg_conf_name): """Run SG with non existing logLevel value """ cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] xattrs_enabled = params_from_base_test_setup["xattrs_enabled"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) cluster_helper = ClusterKeywords() cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf) sg_one_url = cluster_hosts["sync_gateways"][0]["public"] # read sample sg_conf data = load_sync_gateway_config(sg_conf, mode, cluster_hosts["couchbase_servers"][0], xattrs_enabled, cluster_conf) # 'debugFake' invalid value for logLevel data['logging']["default"]["logLevel"] = "debugFake" temp_conf = "/".join(sg_conf.split('/')[:-2]) + '/temp_conf.json' # create temp config file in the same folder as sg_conf with open(temp_conf, 'w') as fp: json.dump(data, fp) # Stop sync_gateways log_info(">>> Stopping sync_gateway") sg_helper = SyncGateway() sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url) try: sg_helper.start_sync_gateways(cluster_config=cluster_conf, url=sg_one_url, config=temp_conf) except ProvisioningError: sg_helper.start_sync_gateways(cluster_config=cluster_conf, url=sg_one_url, config=sg_conf) # Remove generated conf file os.remove(temp_conf) return # Remove generated conf file os.remove(temp_conf) pytest.fail("SG shouldn't be started!!!!")
def test_log_logKeys_string(params_from_base_test_setup, sg_conf_name): """Negative test to verify that we are not able start SG when logKeys is string """ cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] xattrs_enabled = params_from_base_test_setup["xattrs_enabled"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) # read sample sg_conf cluster_helper = ClusterKeywords() cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf) sg_one_url = cluster_hosts["sync_gateways"][0]["public"] data = load_sync_gateway_config(sg_conf, mode, cluster_hosts["couchbase_servers"][0], xattrs_enabled, cluster_conf) # set logKeys as string in config file data['logging']["default"]["logKeys"] = "http" # create temp config file in the same folder as sg_conf temp_conf = "/".join(sg_conf.split('/')[:-2]) + '/temp_conf.json' with open(temp_conf, 'w') as fp: json.dump(data, fp) # Stop sync_gateways log_info(">>> Stopping sync_gateway") sg_helper = SyncGateway() sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url) try: sg_helper.start_sync_gateways(cluster_config=cluster_conf, url=sg_one_url, config=temp_conf) except ProvisioningError: sg_helper.start_sync_gateways(cluster_config=cluster_conf, url=sg_one_url, config=sg_conf) # Remove generated conf file os.remove(temp_conf) return # Remove generated conf file os.remove(temp_conf) pytest.fail("SG shouldn't be started!!!!")
def test_webhooks(params_from_base_test_setup, sg_conf_name, num_users, num_channels, num_docs, num_revisions): start = time.time() cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'test_webhooks'") log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using num_users: {}".format(num_users)) log_info("Using num_channels: {}".format(num_channels)) log_info("Using num_docs: {}".format(num_docs)) log_info("Using num_revisions: {}".format(num_revisions)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_conf) init_completed = time.time() log_info("Initialization completed. Time taken:{}s".format(init_completed - start)) channels = ["channel-" + str(i) for i in range(num_channels)] password = "******" ws = WebServer() ws.start() sgs = cluster.sync_gateways admin = Admin(sgs[0]) # Register User log_info("Register User") user_objects = admin.register_bulk_users(target=sgs[0], db="db", name_prefix="User", number=num_users, password=password, channels=channels) # Add User log_info("Add docs") in_parallel(user_objects, 'add_docs', num_docs) # Update docs log_info("Update docs") in_parallel(user_objects, 'update_docs', num_revisions) time.sleep(30) ws.stop() expected_events = (num_users * num_docs * num_revisions) + (num_users * num_docs) received_events = len(ws.get_data()) log_info("expected_events: {} received_events {}".format(expected_events, received_events)) assert expected_events == received_events
def test_longpoll_changes_sanity(params_from_base_test_setup, sg_conf_name, num_docs, num_revisions): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running: 'longpoll_changes_sanity': {}".format(cluster_conf)) log_info("cluster_conf: {}".format(cluster_conf)) log_info("sg_conf: {}".format(sg_conf)) log_info("num_docs: {}".format(num_docs)) log_info("num_revisions: {}".format(num_revisions)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) admin = Admin(cluster.sync_gateways[0]) seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="******", channels=["ABC", "TERMINATE"]) abc_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="abc_doc_pusher", password="******", channels=["ABC"]) doc_terminator = admin.register_user(target=cluster.sync_gateways[0], db="db", name="doc_terminator", password="******", channels=["TERMINATE"]) docs_in_changes = dict() with concurrent.futures.ThreadPoolExecutor(max_workers=libraries.testkit.settings.MAX_REQUEST_WORKERS) as executor: futures = dict() futures[executor.submit(seth.start_longpoll_changes_tracking, termination_doc_id="killpolling")] = "polling" futures[executor.submit(abc_doc_pusher.add_docs, num_docs)] = "doc_pusher" for future in concurrent.futures.as_completed(futures): task_name = futures[future] # Send termination doc to seth long poller if task_name == "doc_pusher": abc_doc_pusher.update_docs(num_revs_per_doc=num_revisions) # Allow time for changes to reach subscribers time.sleep(5) doc_terminator.add_doc("killpolling") elif task_name == "polling": docs_in_changes, seq_num = future.result() # Verify abc_docs_pusher gets the correct docs in changes feed verify_changes(abc_doc_pusher, expected_num_docs=num_docs, expected_num_revisions=num_revisions, expected_docs=abc_doc_pusher.cache) # Verify docs from seth continous changes is the same as abc_docs_pusher's docs verify_same_docs(expected_num_docs=num_docs, doc_dict_one=docs_in_changes, doc_dict_two=abc_doc_pusher.cache)
def test_continuous_changes_parametrized(params_from_base_test_setup, sg_conf_name, num_users, num_docs, num_revisions): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'continuous_changes_parametrized'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("sg_conf: {}".format(sg_conf)) log_info("num_users: {}".format(num_users)) log_info("num_docs: {}".format(num_docs)) log_info("num_revisions: {}".format(num_revisions)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) admin = Admin(cluster.sync_gateways[0]) users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="user", number=num_users, password="******", channels=["ABC", "TERMINATE"]) abc_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="abc_doc_pusher", password="******", channels=["ABC"]) doc_terminator = admin.register_user(target=cluster.sync_gateways[0], db="db", name="doc_terminator", password="******", channels=["TERMINATE"]) with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor: futures = {executor.submit(user.start_continuous_changes_tracking, termination_doc_id="killcontinuous"): user.name for user in users} futures[executor.submit(abc_doc_pusher.add_docs, num_docs)] = "doc_pusher" for future in concurrent.futures.as_completed(futures): task_name = futures[future] # Send termination doc to seth continuous changes feed subscriber if task_name == "doc_pusher": errors = future.result() assert len(errors) == 0 abc_doc_pusher.update_docs(num_revs_per_doc=num_revisions) time.sleep(10) doc_terminator.add_doc("killcontinuous") elif task_name.startswith("user"): # When the user has continuous _changes feed closed, return the docs and verify the user got all the channel docs docs_in_changes = future.result() # Expect number of docs + the termination doc + _user doc verify_same_docs(expected_num_docs=num_docs, doc_dict_one=docs_in_changes, doc_dict_two=abc_doc_pusher.cache) # Expect number of docs + the termination doc verify_changes(abc_doc_pusher, expected_num_docs=num_docs, expected_num_revisions=num_revisions, expected_docs=abc_doc_pusher.cache)
def test_sg_replicate_basic_test_channels(params_from_base_test_setup): cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] log_info("Running 'test_sg_replicate_basic_test_channels'") log_info("Using cluster_config: {}".format(cluster_config)) config = sync_gateway_config_path_for_mode("sync_gateway_sg_replicate", mode) sg1, sg2 = create_sync_gateways( cluster_config=cluster_config, sg_config_path=config ) admin = Admin(sg1) admin.admin_url = sg1.url sg1a_user, sg1b_user, sg2_user = create_sg_users_channels(sg1, sg2, DB1, DB2) # Add docs to sg1 in channel A and channel B doc_id_sg1a = sg1a_user.add_doc() doc_id_sg1b = sg1b_user.add_doc() # Wait until docs show up in changes feed wait_until_doc_in_changes_feed(sg1, DB1, doc_id_sg1a) wait_until_doc_in_changes_feed(sg1, DB1, doc_id_sg1b) # Make sure it doesn't appear on the target DB # even without starting a replication (which will # happen if the SG's are sharing a CB bucket) time.sleep(5) assert_does_not_have_doc(sg2_user, doc_id_sg1a) assert_does_not_have_doc(sg2_user, doc_id_sg1b) # Start a push replication sg1 -> sg2 chans = sg1a_user.channels sg1.start_push_replication( sg2.admin.admin_url, DB1, DB2, continuous=False, use_remote_source=True, channels=chans, use_admin_url=True ) # Verify that the doc added to sg1 made it to sg2 assert_has_doc(sg2_user, doc_id_sg1a)
def test_continuous_changes_sanity(params_from_base_test_setup, sg_conf_name, num_docs, num_revisions): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'continuous_changes_sanity'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("sg_conf: {}".format(sg_conf)) log_info("num_docs: {}".format(num_docs)) log_info("num_revisions: {}".format(num_revisions)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) admin = Admin(cluster.sync_gateways[0]) seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="******", channels=["ABC", "TERMINATE"]) abc_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="abc_doc_pusher", password="******", channels=["ABC"]) doc_terminator = admin.register_user(target=cluster.sync_gateways[0], db="db", name="doc_terminator", password="******", channels=["TERMINATE"]) docs_in_changes = dict() with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor: futures = dict() futures[executor.submit(seth.start_continuous_changes_tracking, termination_doc_id="killcontinuous")] = "continuous" futures[executor.submit(abc_doc_pusher.add_docs, num_docs)] = "doc_pusher" for future in concurrent.futures.as_completed(futures): task_name = futures[future] # Send termination doc to seth continuous changes feed subscriber if task_name == "doc_pusher": abc_doc_pusher.update_docs(num_revs_per_doc=num_revisions) time.sleep(5) doc_terminator.add_doc("killcontinuous") elif task_name == "continuous": docs_in_changes = future.result() # Expect number of docs + the termination doc verify_changes(abc_doc_pusher, expected_num_docs=num_docs, expected_num_revisions=num_revisions, expected_docs=abc_doc_pusher.cache) # Expect number of docs + the termination doc + _user doc verify_same_docs(expected_num_docs=num_docs, doc_dict_one=docs_in_changes, doc_dict_two=abc_doc_pusher.cache)
def test_openidconnect_negative_token_expiry(params_from_base_test_setup, sg_conf_name): """Create a token with a negative expiry time and expect that authentication is not possible""" cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper = ClusterKeywords() topology = cluster_helper.get_cluster_topology(cluster_config) sg_url = topology["sync_gateways"][0]["public"] sg_db = "db" log_info("Running 'test_openidconnect_negative_token_expiry'") log_info("Using cluster_config: {}".format(cluster_config)) log_info("Using sg_url: {}".format(sg_url)) log_info("Using sg_db: {}".format(sg_db)) cluster_helper = ClusterKeywords() cluster_helper.reset_cluster( cluster_config=cluster_config, sync_gateway_config=sg_conf ) token_expiry_seconds = -5 # multipart/form data content formdata = { 'username': ('', 'testuser'), 'authenticated': ('', 'Return a valid authorization code for this user'), 'tokenttl': ('', "{}".format(token_expiry_seconds)), } # get the authenticate endpoint and query params, should look something like: # authenticate?client_id=sync_gateway&redirect_uri= ... authenticate_endpoint = discover_authenticate_endpoint(sg_url, sg_db, DEFAULT_PROVIDER) # build the full url url = "{}/{}/_oidc_testing/{}".format( sg_url, sg_db, authenticate_endpoint ) response = requests.post(url, files=formdata) assert response.status_code == 500
def test_multiple_db_single_data_bucket_single_index_bucket(params_from_base_test_setup, sg_conf_name, num_users, num_docs_per_user): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'multiple_db_unique_data_bucket_unique_index_bucket'") log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) log_info("Using num_users: {}".format(num_users)) log_info("Using num_docs_per_user: {}".format(num_docs_per_user)) # 2 dbs share the same data and index bucket cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) num_db_users = num_users num_db2_users = num_users num_docs_per_user = num_docs_per_user admin = Admin(cluster.sync_gateways[0]) db_one_users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="bulk_db_user", number=num_db_users, password="******", channels=["ABC"]) db_two_users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db2", name_prefix="bulk_db2_user", number=num_db2_users, password="******", channels=["ABC"]) all_users = list(db_one_users) all_users.extend(db_two_users) assert len(all_users) == num_db_users + num_db2_users # Round robin num_sgs = len(cluster.sync_gateways) count = 1 for user in all_users: user.add_docs(num_docs_per_user, bulk=True) user.target = cluster.sync_gateways[(count + 1) % num_sgs] count += 1 time.sleep(10) # Get list of all docs from users caches cached_docs_from_all_users = {k: v for user in all_users for k, v in user.cache.items()} # Verify each user has all of the docs verify_changes(all_users, expected_num_docs=(num_users * 2) * num_docs_per_user, expected_num_revisions=0, expected_docs=cached_docs_from_all_users)
def test_log_nondefault_logKeys_set(params_from_base_test_setup, sg_conf_name): """Test to verify non default logKeys with any invalid area. SG should work even with non existing logging area (positive case) """ cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] xattrs_enabled = params_from_base_test_setup["xattrs_enabled"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) # read sample sg_conf cluster_helper = ClusterKeywords() cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf) sg_one_url = cluster_hosts["sync_gateways"][0]["public"] data = load_sync_gateway_config(sg_conf, mode, cluster_hosts["couchbase_servers"][0], xattrs_enabled, cluster_conf) # "FAKE" not valid area in logging data['logging']["default"]["logKeys"] = ["HTTP", "FAKE"] # create temp config file in the same folder as sg_conf temp_conf = "/".join(sg_conf.split('/')[:-2]) + '/temp_conf.json' with open(temp_conf, 'w') as fp: json.dump(data, fp) # Stop sync_gateways log_info(">>> Stopping sync_gateway") sg_helper = SyncGateway() sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url) # Start sync_gateways sg_helper.start_sync_gateways(cluster_config=cluster_conf, url=sg_one_url, config=temp_conf) # Remove generated conf file os.remove(temp_conf)
def test_replication_config(params_from_base_test_setup): cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] log_info("Running 'test_replication_config'") log_info("Using cluster_config: {}".format(cluster_config)) config = sync_gateway_config_path_for_mode("sync_gateway_sg_replicate_continuous", mode) sg1, sg2 = create_sync_gateways( cluster_config=cluster_config, sg_config_path=config ) # Wait until active_tasks is non empty wait_until_active_tasks_non_empty(sg1) pass
def test_invalid_revs_limit_with_allow_conflicts(params_from_base_test_setup, sg_conf_name, revs_limit): """ @summary Verify all borders of revs limit Test case in Excel sheet : https://docs.google.com/spreadsheets/d/1YwI_gCeoBebQKBybkzoAEoXSc0XLReszDA-mPFQapgk/edit#gid=0 Covered Test case #2 Steps: - Add a doc - Have allow_conflicts to true in sg config - Put revs_limit=1 or any number lower than 20 and restart sync-gateway - Verify it fails - change revs_limit=20 and start sync-gateway - Verify it starts without any error """ cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] no_conflicts_enabled = params_from_base_test_setup["no_conflicts_enabled"] if no_conflicts_enabled: pytest.skip( '--no-conflicts is enabled, this test needs to create conflicts, so skipping the test' ) sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) clust = cluster.Cluster(cluster_config) clust.reset(sg_conf) temp_cluster_config = copy_to_temp_conf(cluster_config, mode) persist_cluster_config_environment_prop(temp_cluster_config, 'revs_limit', revs_limit, property_name_check=False) status = clust.sync_gateways[0].restart(config=sg_conf, cluster_config=temp_cluster_config) assert status != 0, "Syncgateway started with revs limit 1 when no conflicts disabled" # Now change the revs_limit to 20 revs_limit = 20 persist_cluster_config_environment_prop(temp_cluster_config, 'revs_limit', revs_limit, property_name_check=False) status = clust.sync_gateways[0].restart(config=sg_conf, cluster_config=temp_cluster_config) assert status == 0, "Syncgateway did not start after revs_limit changed to 20"
def test_single_user_multiple_channels(params_from_base_test_setup, sg_conf_name): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'single_user_multiple_channels'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) start = time.time() sgs = cluster.sync_gateways admin = Admin(sgs[0]) seth = admin.register_user(target=sgs[0], db="db", name="seth", password="******", channels=["ABC", "CBS", "NBC", "FOX"]) # Round robin count = 1 num_sgs = len(cluster.sync_gateways) while count <= 5: seth.add_docs(1000, bulk=True) seth.target = cluster.sync_gateways[count % num_sgs] count += 1 log_info(seth) time.sleep(10) verify_changes(users=[seth], expected_num_docs=5000, expected_num_revisions=0, expected_docs=seth.cache) end = time.time() log_info("TIME:{}s".format(end - start))
def test_stop_replication_via_replication_id(params_from_base_test_setup): cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] log_info("Running 'test_stop_replication_via_replication_id'") log_info("Using cluster_config: {}".format(cluster_config)) config = sync_gateway_config_path_for_mode("sync_gateway_sg_replicate", mode) sg1, sg2 = create_sync_gateways( cluster_config=cluster_config, sg_config_path=config ) # Create users (in order to add docs) sg1_user, sg2_user = create_sg_users(sg1, sg2, DB1, DB2) # Kick off continuous replication sg1.start_push_replication( sg2.admin.admin_url, DB1, DB2, continuous=True, use_remote_source=True, use_admin_url=True ) # Make sure there is one active task active_tasks = sg1.admin.get_active_tasks() assert len(active_tasks) == 1 active_task = active_tasks[0] # get the replication id from the active tasks replication_id = active_task["replication_id"] # stop the replication sg1.stop_replication_by_id(replication_id, use_admin_url=True) # verify that the replication is stopped active_tasks = sg1.admin.get_active_tasks() log_info("active_tasks after stop: {}".format(active_tasks)) assert len(active_tasks) == 0
def test_openidconnect_notauthenticated(params_from_base_test_setup, sg_conf_name): """Simulate a failed authentication and make sure no session is created""" cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper = ClusterKeywords() topology = cluster_helper.get_cluster_topology(cluster_config) sg_url = topology["sync_gateways"][0]["public"] sg_db = "db" log_info("Running 'test_openidconnect_notauthenticated'") log_info("Using cluster_config: {}".format(cluster_config)) log_info("Using sg_url: {}".format(sg_url)) log_info("Using sg_db: {}".format(sg_db)) cluster_helper = ClusterKeywords() cluster_helper.reset_cluster( cluster_config=cluster_config, sync_gateway_config=sg_conf ) # get the authenticate endpoint and query params, should look something like: # authenticate?client_id=sync_gateway&redirect_uri= ... authenticate_endpoint = discover_authenticate_endpoint(sg_url, sg_db, DEFAULT_PROVIDER) # build the full url authenticate_endpoint_url = "{}/{}/_oidc_testing/{}".format( sg_url, sg_db, authenticate_endpoint ) # Make the request to _oidc_testing formdata = { 'username': ('', 'testuser'), 'notauthenticated': ('', 'Return an authorization error for this user') } response = requests.post(authenticate_endpoint_url, files=formdata) assert response.status_code == 401
def test_bulk_get_compression(params_from_base_test_setup, sg_conf_name, num_docs, accept_encoding, x_accept_part_encoding, user_agent): cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'test_bulk_get_compression'") log_info("Using cluster_config: {}".format(cluster_config)) log_info("Using sg_conf: {}".format(sg_conf)) log_info("Using num_docs: {}".format(num_docs)) log_info("Using user_agent: {}".format(user_agent)) log_info("Using accept_encoding: {}".format(accept_encoding)) log_info("Using x_accept_part_encoding: {}".format(x_accept_part_encoding)) cluster = Cluster(config=cluster_config) cluster.reset(sg_config_path=sg_conf) admin = Admin(cluster.sync_gateways[0]) user = admin.register_user(cluster.sync_gateways[0], "db", "seth", "password", channels=["seth"]) doc_body = Data.load("mock_users_20k.json") with concurrent.futures.ThreadPoolExecutor(max_workers=libraries.testkit.settings.MAX_REQUEST_WORKERS) as executor: futures = [executor.submit(user.add_doc, doc_id="test-{}".format(i), content=doc_body) for i in range(num_docs)] for future in concurrent.futures.as_completed(futures): try: log_info(future.result()) except Exception as e: log_info("Failed to push doc: {}".format(e)) docs = [{"id": "test-{}".format(i)} for i in range(num_docs)] payload = {"docs": docs} # Issue curl request and get size of request response_size = issue_request(cluster.sync_gateways[0], user_agent, accept_encoding, x_accept_part_encoding, payload) log_info("Response size: {}".format(response_size)) # Verfiy size matches expected size verify_response_size(user_agent, accept_encoding, x_accept_part_encoding, response_size)
def test_sg_replicate_non_existent_db(params_from_base_test_setup): cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] log_info("Running 'test_sg_replicate_non_existent_db'") log_info("Using cluster_config: {}".format(cluster_config)) config = sync_gateway_config_path_for_mode("sync_gateway_sg_replicate", mode) sg1, sg2 = create_sync_gateways( cluster_config=cluster_config, sg_config_path=config ) # delete databases if they exist try: sg1.admin.delete_db(DB1) sg2.admin.delete_db(DB2) except HTTPError: logging.debug("Got HTTPError trying to delete a DB, which means it didn't already exist") # Start a push replication got_exception = False try: sg1.start_push_replication( sg2.admin.admin_url, DB1, DB2, continuous=False, use_remote_source=True, use_admin_url=True ) except HTTPError: got_exception = True assert got_exception is True, 'Expected an exception trying to create a replication against non-existent db'
def test_sg_replicate_basic_test(params_from_base_test_setup): cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] log_info("Running 'test_sg_replicate_basic_test'") log_info("Using cluster_config: {}".format(cluster_config)) config = sync_gateway_config_path_for_mode("sync_gateway_sg_replicate", mode) sg1, sg2 = create_sync_gateways( cluster_config=cluster_config, sg_config_path=config ) admin = Admin(sg1) admin.admin_url = sg1.url sg1_user, sg2_user = create_sg_users(sg1, sg2, DB1, DB2) # Add docs to sg1 and sg2 doc_id_sg1 = sg1_user.add_doc() doc_id_sg2 = sg2_user.add_doc() # Wait until docs show up in changes feed wait_until_doc_in_changes_feed(sg1, DB1, doc_id_sg1) wait_until_doc_in_changes_feed(sg2, DB2, doc_id_sg2) # Make sure it doesn't appear on the target DB # even without starting a replication (which will # happen if the SG's are sharing a CB bucket) time.sleep(5) assert_does_not_have_doc(sg2_user, doc_id_sg1) assert_does_not_have_doc(sg1_user, doc_id_sg2) # Start a push replication sg1 -> sg2 # Should block until replication # Result should contain the stats of the completed replication replication_result = sg1.start_push_replication( sg2.admin.admin_url, DB1, DB2, continuous=False, use_remote_source=True, use_admin_url=True ) logging.debug("replication_result 1: {}".format(replication_result)) assert replication_result["continuous"] is False, 'replication_result["continuous"] != False' assert replication_result["docs_written"] == 1, 'replication_result["docs_written"] != 1' assert replication_result["docs_read"] == 1, 'replication_result["docs_read"] != 1' assert replication_result["doc_write_failures"] == 0, 'replication_result["doc_write_failures"] != 0' # Start a pull replication sg1 <- sg2 replication_result = sg1.start_pull_replication( sg2.admin.admin_url, DB2, DB1, continuous=False, use_remote_target=True, use_admin_url=True ) logging.debug("replication_result 2: {}".format(replication_result)) assert replication_result["continuous"] is False, 'replication_result["continuous"] != False' assert replication_result["docs_written"] == 1, 'replication_result["docs_written"] != 1' assert replication_result["docs_read"] == 1, 'replication_result["docs_read"] != 1' assert replication_result["doc_write_failures"] == 0, 'replication_result["doc_write_failures"] != 0' # Verify that the doc added to sg1 made it to sg2 assert_has_doc(sg2_user, doc_id_sg1) # Verify that the doc added to sg2 made it to sg1 assert_has_doc(sg1_user, doc_id_sg2)
def test_sg_replicate_continuous_replication(params_from_base_test_setup): cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] log_info("Running 'test_sg_replicate_continuous_replication'") log_info("Using cluster_config: {}".format(cluster_config)) config = sync_gateway_config_path_for_mode("sync_gateway_sg_replicate", mode) sg1, sg2 = create_sync_gateways( cluster_config=cluster_config, sg_config_path=config ) # Create users (in order to add docs) sg1_user, sg2_user = create_sg_users(sg1, sg2, DB1, DB2) # Kick off continuous replication sg1.start_push_replication( sg2.admin.admin_url, DB1, DB2, continuous=True, use_remote_source=True, use_admin_url=True ) # Add docs doc_id = sg1_user.add_doc() # Wait til all docs sync to target wait_until_docs_sync(sg2_user, [doc_id]) # Shutdown target sg2.stop() # Add more docs doc_id_2 = sg1_user.add_doc() # Wait a few seconds to give the source replicator time to have some attempts time.sleep(5) # Restart target config = sync_gateway_config_path_for_mode("sync_gateway_sg_replicate", mode) sg2.start(config=config) # Wait til all docs sync to target wait_until_docs_sync(sg2_user, [doc_id, doc_id_2]) # Stop replications sg1.stop_push_replication( sg2.admin.admin_url, DB1, DB2, continuous=True, use_remote_source=True, use_admin_url=True ) # Wait until active_tasks is empty (or throw exception) wait_until_active_tasks_empty(sg1) # Add more docs, even though the replication is already stopped doc_id_3 = sg1_user.add_doc() # Wait a few seconds to give it time to potentially propagate time.sleep(5) # Make sure the doc did not propagate to the target assert_does_not_have_doc(sg2_user, doc_id_3)