def test_longpoll_changes_sanity(params_from_base_test_setup, sg_conf_name, num_docs, num_revisions): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running: 'longpoll_changes_sanity': {}".format(cluster_conf)) log_info("cluster_conf: {}".format(cluster_conf)) log_info("sg_conf: {}".format(sg_conf)) log_info("num_docs: {}".format(num_docs)) log_info("num_revisions: {}".format(num_revisions)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) admin = Admin(cluster.sync_gateways[0]) seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="******", channels=["ABC", "TERMINATE"]) abc_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="abc_doc_pusher", password="******", channels=["ABC"]) doc_terminator = admin.register_user(target=cluster.sync_gateways[0], db="db", name="doc_terminator", password="******", channels=["TERMINATE"]) docs_in_changes = dict() with concurrent.futures.ThreadPoolExecutor(max_workers=libraries.testkit.settings.MAX_REQUEST_WORKERS) as executor: futures = dict() futures[executor.submit(seth.start_longpoll_changes_tracking, termination_doc_id="killpolling")] = "polling" futures[executor.submit(abc_doc_pusher.add_docs, num_docs)] = "doc_pusher" for future in concurrent.futures.as_completed(futures): task_name = futures[future] # Send termination doc to seth long poller if task_name == "doc_pusher": abc_doc_pusher.update_docs(num_revs_per_doc=num_revisions) # Allow time for changes to reach subscribers time.sleep(5) doc_terminator.add_doc("killpolling") elif task_name == "polling": docs_in_changes, seq_num = future.result() # Verify abc_docs_pusher gets the correct docs in changes feed verify_changes(abc_doc_pusher, expected_num_docs=num_docs, expected_num_revisions=num_revisions, expected_docs=abc_doc_pusher.cache) # Verify docs from seth continous changes is the same as abc_docs_pusher's docs verify_same_docs(expected_num_docs=num_docs, doc_dict_one=docs_in_changes, doc_dict_two=abc_doc_pusher.cache)
def test_continuous_changes_parametrized(params_from_base_test_setup, sg_conf_name, num_users, num_docs, num_revisions): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'continuous_changes_parametrized'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("sg_conf: {}".format(sg_conf)) log_info("num_users: {}".format(num_users)) log_info("num_docs: {}".format(num_docs)) log_info("num_revisions: {}".format(num_revisions)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) admin = Admin(cluster.sync_gateways[0]) users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="user", number=num_users, password="******", channels=["ABC", "TERMINATE"]) abc_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="abc_doc_pusher", password="******", channels=["ABC"]) doc_terminator = admin.register_user(target=cluster.sync_gateways[0], db="db", name="doc_terminator", password="******", channels=["TERMINATE"]) with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor: futures = {executor.submit(user.start_continuous_changes_tracking, termination_doc_id="killcontinuous"): user.name for user in users} futures[executor.submit(abc_doc_pusher.add_docs, num_docs)] = "doc_pusher" for future in concurrent.futures.as_completed(futures): task_name = futures[future] # Send termination doc to seth continuous changes feed subscriber if task_name == "doc_pusher": errors = future.result() assert len(errors) == 0 abc_doc_pusher.update_docs(num_revs_per_doc=num_revisions) time.sleep(10) doc_terminator.add_doc("killcontinuous") elif task_name.startswith("user"): # When the user has continuous _changes feed closed, return the docs and verify the user got all the channel docs docs_in_changes = future.result() # Expect number of docs + the termination doc + _user doc verify_same_docs(expected_num_docs=num_docs, doc_dict_one=docs_in_changes, doc_dict_two=abc_doc_pusher.cache) # Expect number of docs + the termination doc verify_changes(abc_doc_pusher, expected_num_docs=num_docs, expected_num_revisions=num_revisions, expected_docs=abc_doc_pusher.cache)
def test_continuous_changes_sanity(params_from_base_test_setup, sg_conf_name, num_docs, num_revisions): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'continuous_changes_sanity'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("sg_conf: {}".format(sg_conf)) log_info("num_docs: {}".format(num_docs)) log_info("num_revisions: {}".format(num_revisions)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) admin = Admin(cluster.sync_gateways[0]) seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="******", channels=["ABC", "TERMINATE"]) abc_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="abc_doc_pusher", password="******", channels=["ABC"]) doc_terminator = admin.register_user(target=cluster.sync_gateways[0], db="db", name="doc_terminator", password="******", channels=["TERMINATE"]) docs_in_changes = dict() with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor: futures = dict() futures[executor.submit(seth.start_continuous_changes_tracking, termination_doc_id="killcontinuous")] = "continuous" futures[executor.submit(abc_doc_pusher.add_docs, num_docs)] = "doc_pusher" for future in concurrent.futures.as_completed(futures): task_name = futures[future] # Send termination doc to seth continuous changes feed subscriber if task_name == "doc_pusher": abc_doc_pusher.update_docs(num_revs_per_doc=num_revisions) time.sleep(5) doc_terminator.add_doc("killcontinuous") elif task_name == "continuous": docs_in_changes = future.result() # Expect number of docs + the termination doc verify_changes(abc_doc_pusher, expected_num_docs=num_docs, expected_num_revisions=num_revisions, expected_docs=abc_doc_pusher.cache) # Expect number of docs + the termination doc + _user doc verify_same_docs(expected_num_docs=num_docs, doc_dict_one=docs_in_changes, doc_dict_two=abc_doc_pusher.cache)
def test_continuous_changes_parametrized(params_from_base_test_setup, sg_conf_name, num_users, num_docs, num_revisions): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'continuous_changes_parametrized'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("sg_conf: {}".format(sg_conf)) log_info("num_users: {}".format(num_users)) log_info("num_docs: {}".format(num_docs)) log_info("num_revisions: {}".format(num_revisions)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) admin = Admin(cluster.sync_gateways[0]) users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="user", number=num_users, password="******", channels=["ABC", "TERMINATE"]) abc_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="abc_doc_pusher", password="******", channels=["ABC"]) doc_terminator = admin.register_user(target=cluster.sync_gateways[0], db="db", name="doc_terminator", password="******", channels=["TERMINATE"]) with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor: futures = { executor.submit(user.start_continuous_changes_tracking, termination_doc_id="killcontinuous"): user.name for user in users } futures[executor.submit(abc_doc_pusher.add_docs, num_docs)] = "doc_pusher" for future in concurrent.futures.as_completed(futures): task_name = futures[future] # Send termination doc to seth continuous changes feed subscriber if task_name == "doc_pusher": errors = future.result() assert len(errors) == 0 abc_doc_pusher.update_docs(num_revs_per_doc=num_revisions) time.sleep(10) doc_terminator.add_doc("killcontinuous") elif task_name.startswith("user"): # When the user has continuous _changes feed closed, return the docs and verify the user got all the channel docs docs_in_changes = future.result() # Expect number of docs + the termination doc + _user doc verify_same_docs(expected_num_docs=num_docs, doc_dict_one=docs_in_changes, doc_dict_two=abc_doc_pusher.cache) # Expect number of docs + the termination doc verify_changes(abc_doc_pusher, expected_num_docs=num_docs, expected_num_revisions=num_revisions, expected_docs=abc_doc_pusher.cache)
def test_continuous_changes_sanity(params_from_base_test_setup, sg_conf_name, num_docs, num_revisions): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'continuous_changes_sanity'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("sg_conf: {}".format(sg_conf)) log_info("num_docs: {}".format(num_docs)) log_info("num_revisions: {}".format(num_revisions)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) admin = Admin(cluster.sync_gateways[0]) seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="******", channels=["ABC", "TERMINATE"]) abc_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="abc_doc_pusher", password="******", channels=["ABC"]) doc_terminator = admin.register_user(target=cluster.sync_gateways[0], db="db", name="doc_terminator", password="******", channels=["TERMINATE"]) docs_in_changes = dict() with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor: futures = dict() futures[executor.submit( seth.start_continuous_changes_tracking, termination_doc_id="killcontinuous")] = "continuous" futures[executor.submit(abc_doc_pusher.add_docs, num_docs)] = "doc_pusher" for future in concurrent.futures.as_completed(futures): task_name = futures[future] # Send termination doc to seth continuous changes feed subscriber if task_name == "doc_pusher": abc_doc_pusher.update_docs(num_revs_per_doc=num_revisions) time.sleep(5) doc_terminator.add_doc("killcontinuous") elif task_name == "continuous": docs_in_changes = future.result() # Expect number of docs + the termination doc verify_changes(abc_doc_pusher, expected_num_docs=num_docs, expected_num_revisions=num_revisions, expected_docs=abc_doc_pusher.cache) # Expect number of docs + the termination doc + _user doc verify_same_docs(expected_num_docs=num_docs, doc_dict_one=docs_in_changes, doc_dict_two=abc_doc_pusher.cache)
def test_issue_1524(params_from_base_test_setup, sg_conf_name, num_docs): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'issue_1524'") log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) log_info("Using num_docs: {}".format(num_docs)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) admin = Admin(cluster.sync_gateways[0]) user_no_channels = admin.register_user(target=cluster.sync_gateways[0], db="db", name="user_no_channels", password="******") a_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="a_doc_pusher", password="******", channels=["A"]) access_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="access_doc_pusher", password="******") terminator = admin.register_user(target=cluster.sync_gateways[0], db="db", name="terminator", password="******", channels=["A"]) longpoll_docs = {} with concurrent.futures.ThreadPoolExecutor( max_workers=libraries.testkit.settings.MAX_REQUEST_WORKERS ) as executor: futures = dict() futures[executor.submit( user_no_channels.start_longpoll_changes_tracking, termination_doc_id="terminator")] = "polling" log_info("Starting longpoll feed") futures[executor.submit(a_doc_pusher.add_docs, num_docs=num_docs, bulk=True, name_prefix="a-doc")] = "a_docs_pushed" log_info("'A' channel docs pushing") for future in concurrent.futures.as_completed(futures): task_name = futures[future] if task_name == "a_docs_pushed": log_info("'A' channel docs pushed") time.sleep(5) log_info( "Grant 'user_no_channels' access to channel 'A' via sync function" ) access_doc_pusher.add_doc(doc_id="access_doc", content={ "accessUser": "******", "accessChannels": ["A"] }) time.sleep(5) log_info("'terminator' pushing termination doc") terminator.add_doc(doc_id="terminator") if task_name == "polling": log_info("Getting changes from longpoll") longpoll_docs, last_seq = future.result() log_info( "Verify docs in longpoll changes are the expected docs") log_info( "Verifying 'user_no_channels' has same docs as 'a_doc_pusher' + access_doc" ) # One off changes verification will include the termination doc expected_docs = { k: v for cache in [a_doc_pusher.cache, terminator.cache] for k, v in cache.items() } verify_changes(user_no_channels, expected_num_docs=num_docs + 1, expected_num_revisions=0, expected_docs=expected_docs) # TODO: Fix this inconsistency suite wide # Longpoll docs do not save termination doc log_info("Verify docs in longpoll changes are the expected docs") verify_same_docs(num_docs, longpoll_docs, a_doc_pusher.cache)
def test_issue_1524(params_from_base_test_setup, sg_conf_name, num_docs): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'issue_1524'") log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) log_info("Using num_docs: {}".format(num_docs)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) admin = Admin(cluster.sync_gateways[0]) user_no_channels = admin.register_user(target=cluster.sync_gateways[0], db="db", name="user_no_channels", password="******") a_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="a_doc_pusher", password="******", channels=["A"]) access_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="access_doc_pusher", password="******") terminator = admin.register_user(target=cluster.sync_gateways[0], db="db", name="terminator", password="******", channels=["A"]) longpoll_docs = {} with concurrent.futures.ThreadPoolExecutor(max_workers=libraries.testkit.settings.MAX_REQUEST_WORKERS) as executor: futures = dict() futures[executor.submit(user_no_channels.start_longpoll_changes_tracking, termination_doc_id="terminator")] = "polling" log_info("Starting longpoll feed") futures[executor.submit(a_doc_pusher.add_docs, num_docs=num_docs, bulk=True, name_prefix="a-doc")] = "a_docs_pushed" log_info("'A' channel docs pushing") for future in concurrent.futures.as_completed(futures): task_name = futures[future] if task_name == "a_docs_pushed": log_info("'A' channel docs pushed") time.sleep(5) log_info("Grant 'user_no_channels' access to channel 'A' via sync function") access_doc_pusher.add_doc( doc_id="access_doc", content={ "accessUser": "******", "accessChannels": ["A"] } ) time.sleep(5) log_info("'terminator' pushing termination doc") terminator.add_doc(doc_id="terminator") if task_name == "polling": log_info("Getting changes from longpoll") longpoll_docs, last_seq = future.result() log_info("Verify docs in longpoll changes are the expected docs") log_info("Verifying 'user_no_channels' has same docs as 'a_doc_pusher' + access_doc") # One off changes verification will include the termination doc expected_docs = {k: v for cache in [a_doc_pusher.cache, terminator.cache] for k, v in cache.items()} verify_changes(user_no_channels, expected_num_docs=num_docs + 1, expected_num_revisions=0, expected_docs=expected_docs) # TODO: Fix this inconsistency suite wide # Longpoll docs do not save termination doc log_info("Verify docs in longpoll changes are the expected docs") verify_same_docs(num_docs, longpoll_docs, a_doc_pusher.cache)