def test_muliple_users_single_channel(params_from_base_test_setup, sg_conf_name): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'muliple_users_single_channel'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) sgs = cluster.sync_gateways num_docs_seth = 1000 num_docs_adam = 2000 num_docs_traun = 3000 admin = Admin(sgs[0]) seth = admin.register_user(target=sgs[0], db="db", name="seth", password="******", channels=["ABC"]) adam = admin.register_user(target=sgs[0], db="db", name="adam", password="******", channels=["ABC"]) traun = admin.register_user(target=sgs[0], db="db", name="traun", password="******", channels=["ABC"]) seth.add_docs(num_docs_seth) # ABC adam.add_docs(num_docs_adam, bulk=True) # ABC traun.add_docs(num_docs_traun, bulk=True) # ABC assert len(seth.cache) == num_docs_seth assert len(adam.cache) == num_docs_adam assert len(traun.cache) == num_docs_traun # discuss appropriate time with team time.sleep(10) # Each user should get all docs from all users all_caches = [seth.cache, adam.cache, traun.cache] all_docs = {k: v for cache in all_caches for k, v in cache.items()} verify_changes([seth, adam, traun], expected_num_docs=num_docs_seth + num_docs_adam + num_docs_traun, expected_num_revisions=0, expected_docs=all_docs)
def test_seq(params_from_base_test_setup, sg_conf_name, num_users, num_docs, num_revisions): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running seq") log_info("cluster_conf: {}".format(cluster_conf)) log_info("sg_conf: {}".format(sg_conf)) log_info("num_users: {}".format(num_users)) log_info("num_docs: {}".format(num_docs)) log_info("num_revisions: {}".format(num_revisions)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) admin = Admin(cluster.sync_gateways[0]) # all users will share docs due to having the same channel users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="user", number=num_users, password="******", channels=["ABC"]) for user in users: user.add_docs(num_docs, bulk=True) for user in users: user.update_docs(num_revisions) time.sleep(5) user_0_changes = users[0].get_changes(since=0) doc_seq = user_0_changes["results"][num_docs / 2]["seq"] # https://github.com/couchbase/sync_gateway/issues/1475#issuecomment-172426052 # verify you can issue _changes with since=12313-0::1023.15 for user in users: changes = user.get_changes(since=doc_seq) log_info("Trying changes with since={}".format(doc_seq)) assert len(changes["results"]) > 0 second_to_last_doc_entry_seq = changes["results"][-2]["seq"] last_doc_entry_seq = changes["results"][-1]["seq"] log_info('Second to last doc "seq": {}'.format(second_to_last_doc_entry_seq)) log_info('Last doc "seq": {}'.format(last_doc_entry_seq)) if mode == "di": # Verify last "seq" follows the formate 12313-0, not 12313-0::1023.15 log_info('Verify that the last "seq" is a plain hashed value') assert len(second_to_last_doc_entry_seq.split("::")) == 2 assert len(last_doc_entry_seq.split("::")) == 1 elif mode == "cc": assert second_to_last_doc_entry_seq > 0 assert last_doc_entry_seq > 0 else: raise ValueError("Unsupported 'mode' !!") all_doc_caches = [user.cache for user in users] all_docs = {k: v for cache in all_doc_caches for k, v in cache.items()} verify_changes(users, expected_num_docs=num_users * num_docs, expected_num_revisions=num_revisions, expected_docs=all_docs)
def test_dcp_reshard_single_sg_accel_goes_down_and_up(params_from_base_test_setup, sg_conf): cluster_conf = params_from_base_test_setup["cluster_config"] log_info("Running 'test_dcp_reshard_single_sg_accel_goes_down_and_up'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) # Stop the second sg_accel stop_status = cluster.sg_accels[1].stop() assert stop_status == 0, "Failed to stop sg_accel" admin = Admin(cluster.sync_gateways[0]) traun = admin.register_user(target=cluster.sync_gateways[0], db="db", name="traun", password="******", channels=["ABC", "NBC", "CBS"]) seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="******", channels=["FOX"]) log_info(">> Users added") with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor: futures = dict() log_info(">>> Adding Seth docs") # FOX futures[executor.submit(seth.add_docs, 8000)] = "seth" log_info(">>> Adding Traun docs") # ABC, NBC, CBS futures[executor.submit(traun.add_docs, 10000, bulk=True)] = "traun" # take down a sync_gateway shutdown_status = cluster.sg_accels[0].stop() assert shutdown_status == 0 # Add more docs while no writers are online log_info(">>> Adding Seth docs") # FOX futures[executor.submit(seth.add_docs, 2000, bulk=True)] = "seth" # Start a single writer start_status = cluster.sg_accels[0].start(sg_conf) assert start_status == 0 for future in concurrent.futures.as_completed(futures): tag = futures[future] log_info("{} Completed:".format(tag)) # TODO better way to do this time.sleep(120) verify_changes(traun, expected_num_docs=10000, expected_num_revisions=0, expected_docs=traun.cache) verify_changes(seth, expected_num_docs=10000, expected_num_revisions=0, expected_docs=seth.cache) # Start second writer again start_status = cluster.sg_accels[1].start(sg_conf) assert start_status == 0
def test_single_user_single_channel_doc_updates(params_from_base_test_setup, sg_conf_name, num_docs, num_revisions): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log.info("Running 'single_user_single_channel_doc_updates'") log.info("cluster_conf: {}".format(cluster_conf)) log.info("sg_conf: {}".format(sg_conf)) log.info("num_docs: {}".format(num_docs)) log.info("num_revisions: {}".format(num_revisions)) start = time.time() cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) num_docs = num_docs num_revisions = num_revisions username = "******" password = "******" channels = ["channel-1"] sgs = cluster.sync_gateways admin = Admin(sgs[0]) single_user = admin.register_user(target=sgs[0], db="db", name=username, password=password, channels=channels) # Not using bulk docs single_user.add_docs(num_docs, name_prefix="test-") assert len(single_user.cache) == num_docs # let SG catch up with all the changes time.sleep(5) single_user.update_docs(num_revisions) time.sleep(10) verify_changes([single_user], expected_num_docs=num_docs, expected_num_revisions=num_revisions, expected_docs=single_user.cache) end = time.time() log.info("TIME:{}s".format(end - start))
def test_sync_sanity_backfill(params_from_base_test_setup, sg_conf_name): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'sync_sanity_backfill'") log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) radio_stations = ["KMOW", "HWOD", "KDWB"] number_of_docs_per_pusher = 5000 admin = Admin(cluster.sync_gateways[0]) dj_0 = admin.register_user(target=cluster.sync_gateways[0], db="db", name="dj_0", password="******") kdwb_caches = [] for radio_station in radio_stations: doc_pusher = admin.register_user( target=cluster.sync_gateways[0], db="db", name="{}_doc_pusher".format(radio_station), password="******", channels=[radio_station]) doc_pusher.add_docs(number_of_docs_per_pusher, bulk=True) if doc_pusher.name == "KDWB_doc_pusher": kdwb_caches.append(doc_pusher.cache) access_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="access_doc_pusher", password="******") # Grant dj_0 access to KDWB channel via sync after docs are pushed access_doc_pusher.add_doc("access_doc", content="access") # Build global doc_id, rev dict for all docs from all KDWB caches kdwb_docs = {k: v for cache in kdwb_caches for k, v in cache.items()} # wait for changes time.sleep(5) verify_changes(dj_0, expected_num_docs=number_of_docs_per_pusher, expected_num_revisions=0, expected_docs=kdwb_docs)
def test_dcp_reshard_sync_gateway_goes_down(params_from_base_test_setup, sg_conf): cluster_conf = params_from_base_test_setup["cluster_config"] log_info("Running 'test_dcp_reshard_sync_gateway_goes_down'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) mode = cluster.reset(sg_config_path=sg_conf) admin = Admin(cluster.sync_gateways[0]) traun = admin.register_user(target=cluster.sync_gateways[0], db="db", name="traun", password="******", channels=["ABC", "NBC", "CBS"]) seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="******", channels=["FOX"]) log_info(">> Users added") with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor: futures = dict() log_info(">>> Adding Seth docs") # FOX futures[executor.submit(seth.add_docs, 8000)] = "seth" log_info(">>> Adding Traun docs") # ABC, NBC, CBS futures[executor.submit(traun.add_docs, 2000, bulk=True)] = "traun" # stop sg_accel shutdown_status = cluster.sg_accels[0].stop() assert shutdown_status == 0 for future in concurrent.futures.as_completed(futures): tag = futures[future] log_info("{} Completed:".format(tag)) # TODO better way to do this time.sleep(120) verify_changes(traun, expected_num_docs=2000, expected_num_revisions=0, expected_docs=traun.cache) verify_changes(seth, expected_num_docs=8000, expected_num_revisions=0, expected_docs=seth.cache) # Verify that the sg1 is down but the other sync_gateways are running errors = cluster.verify_alive(mode) assert len(errors) == 1 and errors[0][0].hostname == "ac1" # Restart the failing node so that cluster verification does not blow up in test teardown start_status = cluster.sg_accels[0].start(sg_conf) assert start_status == 0
def test_continuous_changes_parametrized(params_from_base_test_setup, sg_conf_name, num_users, num_docs, num_revisions): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'continuous_changes_parametrized'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("sg_conf: {}".format(sg_conf)) log_info("num_users: {}".format(num_users)) log_info("num_docs: {}".format(num_docs)) log_info("num_revisions: {}".format(num_revisions)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) admin = Admin(cluster.sync_gateways[0]) users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="user", number=num_users, password="******", channels=["ABC", "TERMINATE"]) abc_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="abc_doc_pusher", password="******", channels=["ABC"]) doc_terminator = admin.register_user(target=cluster.sync_gateways[0], db="db", name="doc_terminator", password="******", channels=["TERMINATE"]) with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor: futures = {executor.submit(user.start_continuous_changes_tracking, termination_doc_id="killcontinuous"): user.name for user in users} futures[executor.submit(abc_doc_pusher.add_docs, num_docs)] = "doc_pusher" for future in concurrent.futures.as_completed(futures): task_name = futures[future] # Send termination doc to seth continuous changes feed subscriber if task_name == "doc_pusher": errors = future.result() assert len(errors) == 0 abc_doc_pusher.update_docs(num_revs_per_doc=num_revisions) time.sleep(10) doc_terminator.add_doc("killcontinuous") elif task_name.startswith("user"): # When the user has continuous _changes feed closed, return the docs and verify the user got all the channel docs docs_in_changes = future.result() # Expect number of docs + the termination doc + _user doc verify_same_docs(expected_num_docs=num_docs, doc_dict_one=docs_in_changes, doc_dict_two=abc_doc_pusher.cache) # Expect number of docs + the termination doc verify_changes(abc_doc_pusher, expected_num_docs=num_docs, expected_num_revisions=num_revisions, expected_docs=abc_doc_pusher.cache)
def test_longpoll_changes_sanity(params_from_base_test_setup, sg_conf_name, num_docs, num_revisions): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running: 'longpoll_changes_sanity': {}".format(cluster_conf)) log_info("cluster_conf: {}".format(cluster_conf)) log_info("sg_conf: {}".format(sg_conf)) log_info("num_docs: {}".format(num_docs)) log_info("num_revisions: {}".format(num_revisions)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) admin = Admin(cluster.sync_gateways[0]) seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="******", channels=["ABC", "TERMINATE"]) abc_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="abc_doc_pusher", password="******", channels=["ABC"]) doc_terminator = admin.register_user(target=cluster.sync_gateways[0], db="db", name="doc_terminator", password="******", channels=["TERMINATE"]) docs_in_changes = dict() with concurrent.futures.ThreadPoolExecutor(max_workers=libraries.testkit.settings.MAX_REQUEST_WORKERS) as executor: futures = dict() futures[executor.submit(seth.start_longpoll_changes_tracking, termination_doc_id="killpolling")] = "polling" futures[executor.submit(abc_doc_pusher.add_docs, num_docs)] = "doc_pusher" for future in concurrent.futures.as_completed(futures): task_name = futures[future] # Send termination doc to seth long poller if task_name == "doc_pusher": abc_doc_pusher.update_docs(num_revs_per_doc=num_revisions) # Allow time for changes to reach subscribers time.sleep(5) doc_terminator.add_doc("killpolling") elif task_name == "polling": docs_in_changes, seq_num = future.result() # Verify abc_docs_pusher gets the correct docs in changes feed verify_changes(abc_doc_pusher, expected_num_docs=num_docs, expected_num_revisions=num_revisions, expected_docs=abc_doc_pusher.cache) # Verify docs from seth continous changes is the same as abc_docs_pusher's docs verify_same_docs(expected_num_docs=num_docs, doc_dict_one=docs_in_changes, doc_dict_two=abc_doc_pusher.cache)
def test_continuous_changes_sanity(params_from_base_test_setup, sg_conf_name, num_docs, num_revisions): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'continuous_changes_sanity'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("sg_conf: {}".format(sg_conf)) log_info("num_docs: {}".format(num_docs)) log_info("num_revisions: {}".format(num_revisions)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) admin = Admin(cluster.sync_gateways[0]) seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="******", channels=["ABC", "TERMINATE"]) abc_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="abc_doc_pusher", password="******", channels=["ABC"]) doc_terminator = admin.register_user(target=cluster.sync_gateways[0], db="db", name="doc_terminator", password="******", channels=["TERMINATE"]) docs_in_changes = dict() with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor: futures = dict() futures[executor.submit(seth.start_continuous_changes_tracking, termination_doc_id="killcontinuous")] = "continuous" futures[executor.submit(abc_doc_pusher.add_docs, num_docs)] = "doc_pusher" for future in concurrent.futures.as_completed(futures): task_name = futures[future] # Send termination doc to seth continuous changes feed subscriber if task_name == "doc_pusher": abc_doc_pusher.update_docs(num_revs_per_doc=num_revisions) time.sleep(5) doc_terminator.add_doc("killcontinuous") elif task_name == "continuous": docs_in_changes = future.result() # Expect number of docs + the termination doc verify_changes(abc_doc_pusher, expected_num_docs=num_docs, expected_num_revisions=num_revisions, expected_docs=abc_doc_pusher.cache) # Expect number of docs + the termination doc + _user doc verify_same_docs(expected_num_docs=num_docs, doc_dict_one=docs_in_changes, doc_dict_two=abc_doc_pusher.cache)
def test_multiple_db_single_data_bucket_single_index_bucket(params_from_base_test_setup, sg_conf_name, num_users, num_docs_per_user): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'multiple_db_unique_data_bucket_unique_index_bucket'") log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) log_info("Using num_users: {}".format(num_users)) log_info("Using num_docs_per_user: {}".format(num_docs_per_user)) # 2 dbs share the same data and index bucket cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) num_db_users = num_users num_db2_users = num_users num_docs_per_user = num_docs_per_user admin = Admin(cluster.sync_gateways[0]) db_one_users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="bulk_db_user", number=num_db_users, password="******", channels=["ABC"]) db_two_users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db2", name_prefix="bulk_db2_user", number=num_db2_users, password="******", channels=["ABC"]) all_users = list(db_one_users) all_users.extend(db_two_users) assert len(all_users) == num_db_users + num_db2_users # Round robin num_sgs = len(cluster.sync_gateways) count = 1 for user in all_users: user.add_docs(num_docs_per_user, bulk=True) user.target = cluster.sync_gateways[(count + 1) % num_sgs] count += 1 time.sleep(10) # Get list of all docs from users caches cached_docs_from_all_users = {k: v for user in all_users for k, v in user.cache.items()} # Verify each user has all of the docs verify_changes(all_users, expected_num_docs=(num_users * 2) * num_docs_per_user, expected_num_revisions=0, expected_docs=cached_docs_from_all_users)
def test_dcp_reshard_sync_gateway_comes_up(params_from_base_test_setup, sg_conf): cluster_conf = params_from_base_test_setup["cluster_config"] log_info("Running 'test_dcp_reshard_sync_gateway_goes_down'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) stop_status = cluster.sg_accels[0].stop() assert stop_status == 0, "Failed to stop sg_accel" admin = Admin(cluster.sync_gateways[0]) traun = admin.register_user(target=cluster.sync_gateways[0], db="db", name="traun", password="******", channels=["ABC", "NBC", "CBS"]) seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="******", channels=["FOX"]) log_info(">> Users added") with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor: futures = dict() time.sleep(5) log_info(">>> Adding Traun docs") # ABC, NBC, CBS futures[executor.submit(traun.add_docs, 6000)] = "traun" log_info(">>> Adding Seth docs") # FOX futures[executor.submit(seth.add_docs, 4000)] = "seth" # Bring up a sync_gateway up_status = cluster.sg_accels[0].start(sg_conf) assert up_status == 0 for future in concurrent.futures.as_completed(futures): tag = futures[future] log_info("{} Completed:".format(tag)) # TODO better way to do this time.sleep(60) verify_changes(traun, expected_num_docs=6000, expected_num_revisions=0, expected_docs=traun.cache) verify_changes(seth, expected_num_docs=4000, expected_num_revisions=0, expected_docs=seth.cache)
def test_single_user_multiple_channels(params_from_base_test_setup, sg_conf_name): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'single_user_multiple_channels'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) start = time.time() sgs = cluster.sync_gateways admin = Admin(sgs[0]) seth = admin.register_user(target=sgs[0], db="db", name="seth", password="******", channels=["ABC", "CBS", "NBC", "FOX"]) # Round robin count = 1 num_sgs = len(cluster.sync_gateways) while count <= 5: seth.add_docs(1000, bulk=True) seth.target = cluster.sync_gateways[count % num_sgs] count += 1 log_info(seth) time.sleep(10) verify_changes(users=[seth], expected_num_docs=5000, expected_num_revisions=0, expected_docs=seth.cache) end = time.time() log_info("TIME:{}s".format(end - start))
def test_sync_access_sanity(params_from_base_test_setup, sg_conf_name): num_docs = 100 cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'sync_access_sanity'") log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) admin = Admin(cluster.sync_gateways[0]) seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="******") # Push some ABC docs abc_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="abc_doc_pusher", password="******", channels=["ABC"]) abc_doc_pusher.add_docs(num_docs) # Create access doc pusher and grant access Seth to ABC channel access_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="access_doc_pusher", password="******", channels=["access"]) access_doc_pusher.add_doc(doc_id="access_doc", content={"grant_access": "true"}) # Allow docs to backfill time.sleep(5) verify_changes(seth, expected_num_docs=num_docs, expected_num_revisions=0, expected_docs=abc_doc_pusher.cache) # Remove seth from ABC access_doc_pusher.update_doc(doc_id="access_doc", content={"grant_access": "false"}) # Push more ABC docs abc_doc_pusher.add_docs(num_docs) time.sleep(10) # Verify seth sees no abc_docs verify_changes(seth, expected_num_docs=0, expected_num_revisions=0, expected_docs={})
def test_sync_sanity_backfill(params_from_base_test_setup, sg_conf_name): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'sync_sanity_backfill'") log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) radio_stations = ["KMOW", "HWOD", "KDWB"] number_of_docs_per_pusher = 5000 admin = Admin(cluster.sync_gateways[0]) dj_0 = admin.register_user(target=cluster.sync_gateways[0], db="db", name="dj_0", password="******") kdwb_caches = [] for radio_station in radio_stations: doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="{}_doc_pusher".format(radio_station), password="******", channels=[radio_station]) doc_pusher.add_docs(number_of_docs_per_pusher, bulk=True) if doc_pusher.name == "KDWB_doc_pusher": kdwb_caches.append(doc_pusher.cache) access_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="access_doc_pusher", password="******") # Grant dj_0 access to KDWB channel via sync after docs are pushed access_doc_pusher.add_doc("access_doc", content="access") # Build global doc_id, rev dict for all docs from all KDWB caches kdwb_docs = {k: v for cache in kdwb_caches for k, v in cache.items()} # wait for changes time.sleep(5) verify_changes(dj_0, expected_num_docs=number_of_docs_per_pusher, expected_num_revisions=0, expected_docs=kdwb_docs)
def test_single_user_single_channel(params_from_base_test_setup, sg_conf_name): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'single_user_single_channel'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) sgs = cluster.sync_gateways num_seth_docs = 7000 num_cbs_docs = 3000 admin = Admin(sgs[0]) seth = admin.register_user(target=sgs[0], db="db", name="seth", password="******", channels=["ABC"]) cbs_user = admin.register_user(target=sgs[0], db="db", name="cbs_user", password="******", channels=["CBS"]) admin_user = admin.register_user(target=sgs[0], db="db", name="admin", password="******", channels=["ABC", "CBS"]) seth.add_docs(num_seth_docs) cbs_user.add_docs(num_cbs_docs) assert len(seth.cache) == num_seth_docs assert len(cbs_user.cache) == num_cbs_docs assert len(admin_user.cache) == 0 time.sleep(10) verify_changes([seth], expected_num_docs=num_seth_docs, expected_num_revisions=0, expected_docs=seth.cache) verify_changes([cbs_user], expected_num_docs=num_cbs_docs, expected_num_revisions=0, expected_docs=cbs_user.cache) all_doc_caches = [seth.cache, cbs_user.cache] all_docs = {k: v for cache in all_doc_caches for k, v in cache.items()} verify_changes([admin_user], expected_num_docs=num_cbs_docs + num_seth_docs, expected_num_revisions=0, expected_docs=all_docs)
def test_sync_channel_sanity(params_from_base_test_setup, sg_conf_name): num_docs_per_channel = 100 channels = ["ABC", "NBC", "CBS"] cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'sync_channel_sanity'") log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) admin = Admin(cluster.sync_gateways[0]) doc_pushers = [] doc_pusher_caches = [] # Push some ABC docs for channel in channels: doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="{}_doc_pusher".format(channel), password="******", channels=[channel]) doc_pusher.add_docs(num_docs_per_channel, bulk=True) doc_pushers.append(doc_pusher) doc_pusher_caches.append(doc_pusher.cache) # Verfy that none of the doc_pushers get docs. They should all be redirected by the sync function verify_changes(doc_pushers, expected_num_docs=0, expected_num_revisions=0, expected_docs={}) subscriber = admin.register_user(target=cluster.sync_gateways[0], db="db", name="subscriber", password="******", channels=["tv_station_channel"]) # Allow docs to backfill time.sleep(20) # subscriber should recieve all docs all_docs = {k: v for cache in doc_pusher_caches for k, v in cache.items()} verify_changes(subscriber, expected_num_docs=len(channels) * num_docs_per_channel, expected_num_revisions=0, expected_docs=all_docs) # update subscribers cache so the user knows what docs to update subscriber.cache = all_docs subscriber.update_docs(num_revs_per_doc=1) # Allow docs to backfill time.sleep(20) # Verify the doc are back in the repective ABC, NBC, CBS channels # HACK: Ignoring rev_id verification due to the fact that the doc was updated the the subscriber user and not the # doc_pusher for doc_pusher in doc_pushers: verify_changes(doc_pusher, expected_num_docs=num_docs_per_channel, expected_num_revisions=1, expected_docs=doc_pusher.cache, ignore_rev_ids=True) # Verify that all docs have been flaged with _removed = true in changes feed for subscriber verify_docs_removed(subscriber, expected_num_docs=len(all_docs.items()), expected_docs=all_docs)
def test_multiple_users_multiple_channels(params_from_base_test_setup, sg_conf_name): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'multiple_users_multiple_channels'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) num_docs_seth = 1000 num_docs_adam = 2000 num_docs_traun = 3000 sgs = cluster.sync_gateways admin = Admin(sgs[0]) seth = admin.register_user(target=sgs[0], db="db", name="seth", password="******", channels=["ABC"]) adam = admin.register_user(target=sgs[0], db="db", name="adam", password="******", channels=["NBC", "CBS"]) traun = admin.register_user(target=sgs[0], db="db", name="traun", password="******", channels=["ABC", "NBC", "CBS"]) # TODO use bulk docs seth.add_docs(num_docs_seth) # ABC adam.add_docs(num_docs_adam) # NBC, CBS traun.add_docs(num_docs_traun) # ABC, NBC, CBS assert len(seth.cache) == num_docs_seth assert len(adam.cache) == num_docs_adam assert len(traun.cache) == num_docs_traun # discuss appropriate time with team time.sleep(10) # Seth should get docs from seth + traun seth_subset = [seth.cache, traun.cache] seth_expected_docs = {k: v for cache in seth_subset for k, v in cache.items()} verify_changes([seth], expected_num_docs=num_docs_seth + num_docs_traun, expected_num_revisions=0, expected_docs=seth_expected_docs) # Adam should get docs from adam + traun adam_subset = [adam.cache, traun.cache] adam_expected_docs = {k: v for cache in adam_subset for k, v in cache.items()} verify_changes([adam], expected_num_docs=num_docs_adam + num_docs_traun, expected_num_revisions=0, expected_docs=adam_expected_docs) # Traun should get docs from seth + adam + traun traun_subset = [seth.cache, adam.cache, traun.cache] traun_expected_docs = {k: v for cache in traun_subset for k, v in cache.items()} verify_changes([traun], expected_num_docs=num_docs_seth + num_docs_adam + num_docs_traun, expected_num_revisions=0, expected_docs=traun_expected_docs)
def test_seq(params_from_base_test_setup, sg_conf_name, num_users, num_docs, num_revisions): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running seq") log_info("cluster_conf: {}".format(cluster_conf)) log_info("sg_conf: {}".format(sg_conf)) log_info("num_users: {}".format(num_users)) log_info("num_docs: {}".format(num_docs)) log_info("num_revisions: {}".format(num_revisions)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) admin = Admin(cluster.sync_gateways[0]) # all users will share docs due to having the same channel users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="user", number=num_users, password="******", channels=["ABC"]) for user in users: user.add_docs(num_docs, bulk=True) for user in users: user.update_docs(num_revisions) time.sleep(5) user_0_changes = users[0].get_changes(since=0) doc_seq = user_0_changes["results"][num_docs / 2]["seq"] # https://github.com/couchbase/sync_gateway/issues/1475#issuecomment-172426052 # verify you can issue _changes with since=12313-0::1023.15 for user in users: changes = user.get_changes(since=doc_seq) log_info("Trying changes with since={}".format(doc_seq)) assert len(changes["results"]) > 0 second_to_last_doc_entry_seq = changes["results"][-2]["seq"] last_doc_entry_seq = changes["results"][-1]["seq"] log_info('Second to last doc "seq": {}'.format( second_to_last_doc_entry_seq)) log_info('Last doc "seq": {}'.format(last_doc_entry_seq)) if mode == "di": # Verify last "seq" follows the formate 12313-0, not 12313-0::1023.15 log_info('Verify that the last "seq" is a plain hashed value') assert len(second_to_last_doc_entry_seq.split("::")) == 2 assert len(last_doc_entry_seq.split("::")) == 1 elif mode == "cc": assert second_to_last_doc_entry_seq > 0 assert last_doc_entry_seq > 0 else: raise ValueError("Unsupported 'mode' !!") all_doc_caches = [user.cache for user in users] all_docs = {k: v for cache in all_doc_caches for k, v in cache.items()} verify_changes(users, expected_num_docs=num_users * num_docs, expected_num_revisions=num_revisions, expected_docs=all_docs)
def test_sync_require_roles(params_from_base_test_setup, sg_conf_name): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'sync_require_roles'") log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) radio_stations = ["KMOW", "HWOD", "KDWB"] tv_stations = ["ABC", "CBS", "NBC"] number_of_djs = 10 number_of_vjs = 10 number_of_docs_per_pusher = 100 admin = Admin(cluster.sync_gateways[0]) admin.create_role("db", name="radio_stations", channels=radio_stations) admin.create_role("db", name="tv_stations", channels=tv_stations) djs = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="dj", number=number_of_djs, password="******", roles=["radio_stations"]) vjs = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="vj", number=number_of_vjs, password="******", roles=["tv_stations"]) mogul = admin.register_user(target=cluster.sync_gateways[0], db="db", name="mogul", password="******", roles=["tv_stations", "radio_stations"]) radio_doc_caches = [] for radio_station in radio_stations: doc_pusher = admin.register_user( target=cluster.sync_gateways[0], db="db", name="{}_doc_pusher".format(radio_station), password="******", channels=[radio_station], roles=["radio_stations"]) doc_pusher.add_docs(number_of_docs_per_pusher, bulk=True) radio_doc_caches.append(doc_pusher.cache) expected_num_radio_docs = len(radio_stations) * number_of_docs_per_pusher # All docs that have been pushed with the "radio_stations" role all_radio_docs = { k: v for cache in radio_doc_caches for k, v in cache.items() } tv_doc_caches = [] for tv_station in tv_stations: doc_pusher = admin.register_user( target=cluster.sync_gateways[0], db="db", name="{}_doc_pusher".format(tv_station), password="******", channels=[tv_station], roles=["tv_stations"]) doc_pusher.add_docs(number_of_docs_per_pusher, bulk=True) tv_doc_caches.append(doc_pusher.cache) expected_num_tv_docs = len(tv_stations) * number_of_docs_per_pusher # All docs that have been pushed with the "tv_stations" role all_tv_docs = {k: v for cache in tv_doc_caches for k, v in cache.items()} # Read only users radio_channels_no_roles_user = admin.register_user( target=cluster.sync_gateways[0], db="db", name="bad_radio_user", password="******", channels=radio_stations) tv_channel_no_roles_user = admin.register_user( target=cluster.sync_gateways[0], db="db", name="bad_tv_user", password="******", channels=tv_stations) # Should not be allowed radio_channels_no_roles_user.add_docs(13, name_prefix="bad_doc") tv_channel_no_roles_user.add_docs(26, name_prefix="bad_doc") read_only_user_caches = [ radio_channels_no_roles_user.cache, tv_channel_no_roles_user.cache ] read_only_user_docs = { k: v for cache in read_only_user_caches for k, v in cache.items() } # Dictionary should be empty if they were blocked from pushing docs assert len(read_only_user_docs.items()) == 0 # It seems be non deterministic but sometimes when issuing the changes call return, some of the documents are returned but not all. # There is currently no retry loop in verify_changes and I'm guessing that the bulk_docs requests are still processing. time.sleep(5) # Should recieve docs from radio_channels verify_changes(radio_channels_no_roles_user, expected_num_docs=expected_num_radio_docs, expected_num_revisions=0, expected_docs=all_radio_docs) # Should recieve docs from tv_channels verify_changes(tv_channel_no_roles_user, expected_num_docs=expected_num_tv_docs, expected_num_revisions=0, expected_docs=all_tv_docs) # verify all djs with the 'radio_stations' role get the docs with radio station channels verify_changes(djs, expected_num_docs=expected_num_radio_docs, expected_num_revisions=0, expected_docs=all_radio_docs) # verify all djs with the 'radio_stations' role get the docs with radio station channels verify_changes(vjs, expected_num_docs=expected_num_tv_docs, expected_num_revisions=0, expected_docs=all_tv_docs) # Verify mogul gets docs for all the channels associated with the radio_stations + tv_stations roles all_doc_caches = list(radio_doc_caches) all_doc_caches.extend(tv_doc_caches) all_docs = {k: v for cache in all_doc_caches for k, v in cache.items()} for k, v in all_docs.items(): assert not k.startswith("bad_doc") verify_changes(mogul, expected_num_docs=expected_num_radio_docs + expected_num_tv_docs, expected_num_revisions=0, expected_docs=all_docs)
def test_sync_require_roles(params_from_base_test_setup, sg_conf_name): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'sync_require_roles'") log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) radio_stations = ["KMOW", "HWOD", "KDWB"] tv_stations = ["ABC", "CBS", "NBC"] number_of_djs = 10 number_of_vjs = 10 number_of_docs_per_pusher = 100 admin = Admin(cluster.sync_gateways[0]) admin.create_role("db", name="radio_stations", channels=radio_stations) admin.create_role("db", name="tv_stations", channels=tv_stations) djs = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="dj", number=number_of_djs, password="******", roles=["radio_stations"]) vjs = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="vj", number=number_of_vjs, password="******", roles=["tv_stations"]) mogul = admin.register_user(target=cluster.sync_gateways[0], db="db", name="mogul", password="******", roles=["tv_stations", "radio_stations"]) radio_doc_caches = [] for radio_station in radio_stations: doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="{}_doc_pusher".format(radio_station), password="******", channels=[radio_station], roles=["radio_stations"]) doc_pusher.add_docs(number_of_docs_per_pusher, bulk=True) radio_doc_caches.append(doc_pusher.cache) expected_num_radio_docs = len(radio_stations) * number_of_docs_per_pusher # All docs that have been pushed with the "radio_stations" role all_radio_docs = {k: v for cache in radio_doc_caches for k, v in cache.items()} tv_doc_caches = [] for tv_station in tv_stations: doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="{}_doc_pusher".format(tv_station), password="******", channels=[tv_station], roles=["tv_stations"]) doc_pusher.add_docs(number_of_docs_per_pusher, bulk=True) tv_doc_caches.append(doc_pusher.cache) expected_num_tv_docs = len(tv_stations) * number_of_docs_per_pusher # All docs that have been pushed with the "tv_stations" role all_tv_docs = {k: v for cache in tv_doc_caches for k, v in cache.items()} # Read only users radio_channels_no_roles_user = admin.register_user(target=cluster.sync_gateways[0], db="db", name="bad_radio_user", password="******", channels=radio_stations) tv_channel_no_roles_user = admin.register_user(target=cluster.sync_gateways[0], db="db", name="bad_tv_user", password="******", channels=tv_stations) # Should not be allowed radio_channels_no_roles_user.add_docs(13, name_prefix="bad_doc") tv_channel_no_roles_user.add_docs(26, name_prefix="bad_doc") read_only_user_caches = [radio_channels_no_roles_user.cache, tv_channel_no_roles_user.cache] read_only_user_docs = {k: v for cache in read_only_user_caches for k, v in cache.items()} # Dictionary should be empty if they were blocked from pushing docs assert len(read_only_user_docs.items()) == 0 # It seems be non deterministic but sometimes when issuing the changes call return, some of the documents are returned but not all. # There is currently no retry loop in verify_changes and I'm guessing that the bulk_docs requests are still processing. time.sleep(5) # Should recieve docs from radio_channels verify_changes(radio_channels_no_roles_user, expected_num_docs=expected_num_radio_docs, expected_num_revisions=0, expected_docs=all_radio_docs) # Should recieve docs from tv_channels verify_changes(tv_channel_no_roles_user, expected_num_docs=expected_num_tv_docs, expected_num_revisions=0, expected_docs=all_tv_docs) # verify all djs with the 'radio_stations' role get the docs with radio station channels verify_changes(djs, expected_num_docs=expected_num_radio_docs, expected_num_revisions=0, expected_docs=all_radio_docs) # verify all djs with the 'radio_stations' role get the docs with radio station channels verify_changes(vjs, expected_num_docs=expected_num_tv_docs, expected_num_revisions=0, expected_docs=all_tv_docs) # Verify mogul gets docs for all the channels associated with the radio_stations + tv_stations roles all_doc_caches = list(radio_doc_caches) all_doc_caches.extend(tv_doc_caches) all_docs = {k: v for cache in all_doc_caches for k, v in cache.items()} for k, v in all_docs.items(): assert not k.startswith("bad_doc") verify_changes(mogul, expected_num_docs=expected_num_radio_docs + expected_num_tv_docs, expected_num_revisions=0, expected_docs=all_docs)
def test_dcp_reshard_single_sg_accel_goes_down_and_up( params_from_base_test_setup, sg_conf): cluster_conf = params_from_base_test_setup["cluster_config"] log_info("Running 'test_dcp_reshard_single_sg_accel_goes_down_and_up'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) # Stop the second sg_accel stop_status = cluster.sg_accels[1].stop() assert stop_status == 0, "Failed to stop sg_accel" admin = Admin(cluster.sync_gateways[0]) traun = admin.register_user(target=cluster.sync_gateways[0], db="db", name="traun", password="******", channels=["ABC", "NBC", "CBS"]) seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="******", channels=["FOX"]) log_info(">> Users added") with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor: futures = dict() log_info(">>> Adding Seth docs") # FOX futures[executor.submit(seth.add_docs, 8000)] = "seth" log_info(">>> Adding Traun docs") # ABC, NBC, CBS futures[executor.submit(traun.add_docs, 10000, bulk=True)] = "traun" # take down a sync_gateway shutdown_status = cluster.sg_accels[0].stop() assert shutdown_status == 0 # Add more docs while no writers are online log_info(">>> Adding Seth docs") # FOX futures[executor.submit(seth.add_docs, 2000, bulk=True)] = "seth" # Start a single writer start_status = cluster.sg_accels[0].start(sg_conf) assert start_status == 0 for future in concurrent.futures.as_completed(futures): tag = futures[future] log_info("{} Completed:".format(tag)) # TODO better way to do this time.sleep(300) verify_changes(traun, expected_num_docs=10000, expected_num_revisions=0, expected_docs=traun.cache) verify_changes(seth, expected_num_docs=10000, expected_num_revisions=0, expected_docs=seth.cache) # Start second writer again start_status = cluster.sg_accels[1].start(sg_conf) assert start_status == 0
def test_issue_1524(params_from_base_test_setup, sg_conf_name, num_docs): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'issue_1524'") log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) log_info("Using num_docs: {}".format(num_docs)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) admin = Admin(cluster.sync_gateways[0]) user_no_channels = admin.register_user(target=cluster.sync_gateways[0], db="db", name="user_no_channels", password="******") a_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="a_doc_pusher", password="******", channels=["A"]) access_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="access_doc_pusher", password="******") terminator = admin.register_user(target=cluster.sync_gateways[0], db="db", name="terminator", password="******", channels=["A"]) longpoll_docs = {} with concurrent.futures.ThreadPoolExecutor(max_workers=libraries.testkit.settings.MAX_REQUEST_WORKERS) as executor: futures = dict() futures[executor.submit(user_no_channels.start_longpoll_changes_tracking, termination_doc_id="terminator")] = "polling" log_info("Starting longpoll feed") futures[executor.submit(a_doc_pusher.add_docs, num_docs=num_docs, bulk=True, name_prefix="a-doc")] = "a_docs_pushed" log_info("'A' channel docs pushing") for future in concurrent.futures.as_completed(futures): task_name = futures[future] if task_name == "a_docs_pushed": log_info("'A' channel docs pushed") time.sleep(5) log_info("Grant 'user_no_channels' access to channel 'A' via sync function") access_doc_pusher.add_doc( doc_id="access_doc", content={ "accessUser": "******", "accessChannels": ["A"] } ) time.sleep(5) log_info("'terminator' pushing termination doc") terminator.add_doc(doc_id="terminator") if task_name == "polling": log_info("Getting changes from longpoll") longpoll_docs, last_seq = future.result() log_info("Verify docs in longpoll changes are the expected docs") log_info("Verifying 'user_no_channels' has same docs as 'a_doc_pusher' + access_doc") # One off changes verification will include the termination doc expected_docs = {k: v for cache in [a_doc_pusher.cache, terminator.cache] for k, v in cache.items()} verify_changes(user_no_channels, expected_num_docs=num_docs + 1, expected_num_revisions=0, expected_docs=expected_docs) # TODO: Fix this inconsistency suite wide # Longpoll docs do not save termination doc log_info("Verify docs in longpoll changes are the expected docs") verify_same_docs(num_docs, longpoll_docs, a_doc_pusher.cache)
def test_continuous_changes_parametrized(params_from_base_test_setup, sg_conf_name, num_users, num_docs, num_revisions): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'continuous_changes_parametrized'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("sg_conf: {}".format(sg_conf)) log_info("num_users: {}".format(num_users)) log_info("num_docs: {}".format(num_docs)) log_info("num_revisions: {}".format(num_revisions)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) admin = Admin(cluster.sync_gateways[0]) users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="user", number=num_users, password="******", channels=["ABC", "TERMINATE"]) abc_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="abc_doc_pusher", password="******", channels=["ABC"]) doc_terminator = admin.register_user(target=cluster.sync_gateways[0], db="db", name="doc_terminator", password="******", channels=["TERMINATE"]) with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor: futures = { executor.submit(user.start_continuous_changes_tracking, termination_doc_id="killcontinuous"): user.name for user in users } futures[executor.submit(abc_doc_pusher.add_docs, num_docs)] = "doc_pusher" for future in concurrent.futures.as_completed(futures): task_name = futures[future] # Send termination doc to seth continuous changes feed subscriber if task_name == "doc_pusher": errors = future.result() assert len(errors) == 0 abc_doc_pusher.update_docs(num_revs_per_doc=num_revisions) time.sleep(10) doc_terminator.add_doc("killcontinuous") elif task_name.startswith("user"): # When the user has continuous _changes feed closed, return the docs and verify the user got all the channel docs docs_in_changes = future.result() # Expect number of docs + the termination doc + _user doc verify_same_docs(expected_num_docs=num_docs, doc_dict_one=docs_in_changes, doc_dict_two=abc_doc_pusher.cache) # Expect number of docs + the termination doc verify_changes(abc_doc_pusher, expected_num_docs=num_docs, expected_num_revisions=num_revisions, expected_docs=abc_doc_pusher.cache)
def test_bucket_online_offline_resync_with_offline(params_from_base_test_setup, sg_conf_name, num_users, num_docs, num_revisions): start = time.time() cluster_conf = params_from_base_test_setup["cluster_config"] test_mode = params_from_base_test_setup["mode"] if test_mode == "di": pytest.skip("Unsupported feature in distributed index") sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, test_mode) log_info("Running 'test_bucket_online_offline_resync_with_online'") log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using num_users: {}".format(num_users)) log_info("Using num_docs: {}".format(num_docs)) log_info("Using num_revisions: {}".format(num_revisions)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_conf) init_completed = time.time() log_info("Initialization completed. Time taken:{}s".format(init_completed - start)) num_channels = 1 channels = ["channel-" + str(i) for i in range(num_channels)] password = "******" sgs = cluster.sync_gateways admin = Admin(sgs[0]) # Register User log_info("Register User") user_objects = admin.register_bulk_users(target=sgs[0], db="db", name_prefix="User", number=num_users, password=password, channels=channels) user_x = admin.register_user(target=sgs[0], db="db", name="User-X", password="******", channels=["channel_x"]) # Add User log_info("Add docs") in_parallel(user_objects, 'add_docs', num_docs) # Update docs log_info("Update docs") in_parallel(user_objects, 'update_docs', num_revisions) time.sleep(10) # Get changes for all users in_parallel(user_objects, 'get_changes') # every user should have same number of docs # total/expected docs = num_users * num_docs recieved_docs = in_parallel(user_objects, 'get_num_docs') expected_docs = num_users * num_docs for user_obj, docs in recieved_docs.items(): log_info('User {} got {} docs, expected docs: {}'.format(user_obj.name, docs, expected_docs)) assert docs == expected_docs # Verify that # user created doc-ids exist in docs received in changes feed # expected revision is equal to received revision expected_revision = str(num_revisions + 1) docs_rev_dict = in_parallel(user_objects, 'get_num_revisions') rev_errors = [] for user_obj, docs_revision_dict in docs_rev_dict.items(): for doc_id in docs_revision_dict.keys(): rev = docs_revision_dict[doc_id] log_info('User {} doc_id {} has {} revisions, expected revision: {}'.format( user_obj.name, doc_id, rev, expected_revision )) if rev != expected_revision: rev_errors.append(doc_id) log_error('User {} doc_id {} got revision {}, expected revision {}'.format( user_obj.name, doc_id, rev, expected_revision )) assert len(rev_errors) == 0 # Verify each User created docs are part of changes feed output = in_parallel(user_objects, 'check_doc_ids_in_changes_feed') assert True in output.values() # Take "db" offline status = admin.take_db_offline(db="db") assert status == 200 sg_restart_config = sync_gateway_config_path_for_mode("bucket_online_offline/db_online_offline_access_restricted", test_mode) restart_status = cluster.sync_gateways[0].restart(sg_restart_config) assert restart_status == 0 log_info("Sleeping....") time.sleep(10) pool = ThreadPool(processes=1) log_info("Restarted SG....") time.sleep(5) db_info = admin.get_db_info("db") log_info("Status of db = {}".format(db_info["state"])) assert db_info["state"] == "Offline" try: async_resync_result = pool.apply_async(admin.db_resync, ("db",)) log_info("resync issued !!!!!!") except Exception as e: log_info("Catch resync exception: {}".format(e)) time.sleep(1) resync_occured = False for i in range(20): db_info = admin.get_db_info("db") log_info("Status of db = {}".format(db_info["state"])) if db_info["state"] == "Resyncing": resync_occured = True log_info("Resync occured") try: status = admin.get_db_info(db="db") log_info("Got db_info request status: {}".format(status)) except HTTPError as e: log_info("status = {} exception = {}".format(status, e.response.status_code)) assert False else: log_info("Got 200 ok for supported operation") time.sleep(1) if resync_occured: break time.sleep(10) status = admin.bring_db_online(db="db") log_info("online request issued !!!!! response status: {}".format(status)) time.sleep(5) db_info = admin.get_db_info("db") log_info("Status of db = {}".format(db_info["state"])) assert db_info["state"] == "Online" resync_result = async_resync_result.get() log_info("resync_changes {}".format(resync_result)) log_info("expecting num_changes == num_docs {} * num_users {}".format(num_docs, num_users)) assert resync_result['payload']['changes'] == num_docs * num_users assert resync_result['status_code'] == 200 time.sleep(5) global_cache = list() for user in user_objects: global_cache.append(user.cache) all_docs = {k: v for user_cache in global_cache for k, v in user_cache.items()} verify_changes(user_x, expected_num_docs=expected_docs, expected_num_revisions=num_revisions, expected_docs=all_docs) end = time.time() log_info("Test ended.") log_info("Main test duration: {}".format(end - init_completed)) log_info("Test setup time: {}".format(init_completed - start)) log_info("Total Time taken: {}s".format(end - start))
def test_online_to_offline_changes_feed_controlled_close_continuous(params_from_base_test_setup, sg_conf_name, num_docs): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) log_info("Using num_docs: {}".format(num_docs)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) admin = Admin(cluster.sync_gateways[0]) seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="******", channels=["ABC"]) doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="doc_pusher", password="******", channels=["ABC"]) docs_in_changes = dict() doc_add_errors = list() with concurrent.futures.ThreadPoolExecutor(max_workers=libraries.testkit.settings.MAX_REQUEST_WORKERS) as executor: futures = dict() futures[executor.submit(seth.start_continuous_changes_tracking, termination_doc_id=None)] = "continuous" futures[executor.submit(doc_pusher.add_docs, num_docs)] = "docs_push" time.sleep(5) futures[executor.submit(admin.take_db_offline, "db")] = "db_offline_task" for future in concurrent.futures.as_completed(futures): task_name = futures[future] if task_name == "db_offline_task": log_info("DB OFFLINE") # make sure db_offline returns 200 assert future.result() == 200 elif task_name == "docs_push": log_info("DONE PUSHING DOCS") doc_add_errors = future.result() elif task_name == "continuous": docs_in_changes = future.result() log_info("DOCS FROM CHANGES") for k, v in docs_in_changes.items(): log_info("DFC -> {}:{}".format(k, v)) log_info("Number of docs from _changes ({})".format(len(docs_in_changes))) log_info("Number of docs add errors ({})".format(len(doc_add_errors))) # Some docs should have made it to _changes assert len(docs_in_changes) > 0 # Bring db back online status = admin.bring_db_online("db") assert status == 200 # Get all docs that have been pushed # Verify that changes returns all of them all_docs = doc_pusher.get_all_docs() num_docs_pushed = len(all_docs["rows"]) verify_changes(doc_pusher, expected_num_docs=num_docs_pushed, expected_num_revisions=0, expected_docs=doc_pusher.cache) # Check that the number of errors return when trying to push while db is offline + num of docs in db # should equal the number of docs assert num_docs_pushed + len(doc_add_errors) == num_docs
def test_online_to_offline_changes_feed_controlled_close_longpoll(params_from_base_test_setup, sg_conf_name, num_docs): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) log_info("Using num_docs: {}".format(num_docs)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) admin = Admin(cluster.sync_gateways[0]) seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="******", channels=["ABC"]) doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="doc_pusher", password="******", channels=["ABC"]) docs_in_changes = dict() doc_add_errors = list() with concurrent.futures.ThreadPoolExecutor(max_workers=libraries.testkit.settings.MAX_REQUEST_WORKERS) as executor: futures = dict() futures[executor.submit(seth.start_longpoll_changes_tracking, termination_doc_id=None)] = "polling" futures[executor.submit(doc_pusher.add_docs, num_docs)] = "docs_push" time.sleep(5) futures[executor.submit(admin.take_db_offline, "db")] = "db_offline_task" for future in concurrent.futures.as_completed(futures): task_name = futures[future] if task_name == "db_offline_task": log_info("DB OFFLINE") # make sure db_offline returns 200 assert future.result() == 200 if task_name == "docs_push": log_info("DONE PUSHING DOCS") doc_add_errors = future.result() if task_name == "polling": # Long poll will exit with 503, return docs in the exception log_info("POLLING DONE") try: docs_in_changes = future.result() except Exception as e: log_info(e) log_info("POLLING DONE EXCEPTION") log_info("ARGS: {}".format(e.args)) docs_in_changes = e.args[0]["docs"] last_seq_num = e.args[0]["last_seq_num"] log_info("DOCS FROM longpoll") for k, v in docs_in_changes.items(): log_info("DFC -> {}:{}".format(k, v)) log_info("LAST_SEQ_NUM FROM longpoll {}".format(last_seq_num)) log_info("Number of docs from _changes ({})".format(len(docs_in_changes))) log_info("last_seq_num _changes ({})".format(last_seq_num)) log_info("Number of docs add errors ({})".format(len(doc_add_errors))) # Some docs should have made it to _changes assert len(docs_in_changes) > 0 # Make sure some docs failed due to db being taken offline assert len(doc_add_errors) > 0 seq_num_component = last_seq_num.split("-") if mode == "cc": # assert the last_seq_number == number _changes + 2 (_user doc starts and one and docs start at _user doc seq + 2) assert len(docs_in_changes) + 2 == int(seq_num_component[0]) else: # assert the value is not an empty string assert last_seq_num != "" # Bring db back online status = admin.bring_db_online("db") assert status == 200 # # Get all docs that have been pushed # Verify that changes returns all of them all_docs = doc_pusher.get_all_docs() num_docs_pushed = len(all_docs["rows"]) verify_changes(doc_pusher, expected_num_docs=num_docs_pushed, expected_num_revisions=0, expected_docs=doc_pusher.cache) # Check that the number of errors return when trying to push while db is offline + num of docs in db # should equal the number of docs assert num_docs_pushed + len(doc_add_errors) == num_docs
def rest_scan(sync_gateway, db, online, num_docs, user_name, channels): # Missing ADMIN # TODO: GET /{db}/_session/{session-id} # TODO: POST /{db}/_session # TODO: DELETE /{db}/_session/{session-id} # TODO: DELETE /{db}/_user/{name}/_session/{session-id} # TODO: DELETE /{db}/_user/{name}/_session # TODO: DELETE /{db}/_user/{name} # TODO: POST /{db}/_role/ # TODO: DELETE /{db}/_role/{name} # Missing REST # TODO: POST /{db}/_all_docs # TODO: DELETE /{db}/{doc} # TODO: PUT /{db}/{doc}/{attachment} # TODO: GET /{db}/{doc}/{attachment} # Missing Local Document # TODO: DELETE /{db}/{local-doc-id} # Missing Authentication # TODO: POST /{db}/_facebook_token admin = Admin(sync_gateway=sync_gateway) error_responses = list() # PUT /{db}/_role/{name} try: admin.create_role(db=db, name="radio_stations", channels=["HWOD", "KDWB"]) except HTTPError as e: log_info((e.response.url, e.response.status_code)) error_responses.append((e.response.url, e.response.status_code)) # GET /{db}/_role try: roles = admin.get_roles(db=db) log_info(roles) except HTTPError as e: log_info((e.response.url, e.response.status_code)) error_responses.append((e.response.url, e.response.status_code)) # GET /{db}/_role/{name} try: role = admin.get_role(db=db, name="radio_stations") log_info(role) except HTTPError as e: log_info((e.response.url, e.response.status_code)) error_responses.append((e.response.url, e.response.status_code)) # PUT /{db}/_user/{name} try: user = admin.register_user(target=sync_gateway, db=db, name=user_name, password="******", channels=channels) except HTTPError as e: log_info((e.response.url, e.response.status_code)) error_responses.append((e.response.url, e.response.status_code)) # GET /{db}/_user try: users_info = admin.get_users_info(db=db) log_info(users_info) except HTTPError as e: log_info((e.response.url, e.response.status_code)) error_responses.append((e.response.url, e.response.status_code)) # GET /{db}/_user/{name} try: user_info = admin.get_user_info(db=db, name=user_name) log_info(user_info) except HTTPError as e: log_info((e.response.url, e.response.status_code)) error_responses.append((e.response.url, e.response.status_code)) # GET /{db} try: db_info = admin.get_db_info(db=db) if not online: assert db_info["state"] == "Offline" else: assert db_info["state"] == "Online" log_info(db_info) except HTTPError as e: log_info((e.response.url, e.response.status_code)) error_responses.append((e.response.url, e.response.status_code)) # Create dummy user to hit endpoint if offline, user creation above will fail if not online: user = User(target=sync_gateway, db=db, name=user_name, password="******", channels=channels) # PUT /{db}/{name} add_docs_errors = user.add_docs(num_docs=num_docs) error_responses.extend(add_docs_errors) # POST /{db}/_bulk_docs bulk_doc_errors = user.add_docs(num_docs=num_docs, bulk=True) error_responses.extend(bulk_doc_errors) # POST /{db}/ for i in range(num_docs): try: user.add_doc() except HTTPError as e: log_info((e.response.url, e.response.status_code)) error_responses.append((e.response.url, e.response.status_code)) # GET /{db}/{name} # PUT /{db}/{name} if online: update_docs_errors = user.update_docs(num_revs_per_doc=1) error_responses.extend(update_docs_errors) else: try: # Try to hit the GET enpoint for "test-id" user.update_doc("test-id") except HTTPError as e: log_info((e.response.url, e.response.status_code)) error_responses.append((e.response.url, e.response.status_code)) # PUT /{db}/{local-doc-id} local_doc_id = uuid.uuid4() try: doc = user.add_doc("_local/{}".format(local_doc_id), content={"message": "I should not be replicated"}) except HTTPError as e: log_info((e.response.url, e.response.status_code)) error_responses.append((e.response.url, e.response.status_code)) # GET /{db}/{local-doc-id} try: doc = user.get_doc("_local/{}".format(local_doc_id)) assert doc["content"]["message"] == "I should not be replicated" except HTTPError as e: log_info((e.response.url, e.response.status_code)) error_responses.append((e.response.url, e.response.status_code)) # GET /{db}/_all_docs try: all_docs_result = user.get_all_docs() # num_docs /{db}/{doc} PUT + num_docs /{db}/_bulk_docs + num_docs POST /{db}/ assert len(all_docs_result["rows"]) == num_docs * 3 except HTTPError as e: log_info((e.response.url, e.response.status_code)) error_responses.append((e.response.url, e.response.status_code)) # POST /{db}/_bulk_get try: doc_ids = list(user.cache.keys()) first_ten_ids = doc_ids[:10] first_ten = user.get_docs(first_ten_ids) assert len(first_ten) == 10 except HTTPError as e: log_info((e.response.url, e.response.status_code)) error_responses.append((e.response.url, e.response.status_code)) # wait for changes time.sleep(2) # GET /{db}/_changes try: user.get_changes() # If successful, verify the _changes feed verify_changes(user, expected_num_docs=num_docs * 3, expected_num_revisions=1, expected_docs=user.cache) except HTTPError as e: log_info((e.response.url, e.response.status_code)) error_responses.append((e.response.url, e.response.status_code)) return error_responses
def test_issue_1524(params_from_base_test_setup, sg_conf_name, num_docs): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'issue_1524'") log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) log_info("Using num_docs: {}".format(num_docs)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) admin = Admin(cluster.sync_gateways[0]) user_no_channels = admin.register_user(target=cluster.sync_gateways[0], db="db", name="user_no_channels", password="******") a_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="a_doc_pusher", password="******", channels=["A"]) access_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="access_doc_pusher", password="******") terminator = admin.register_user(target=cluster.sync_gateways[0], db="db", name="terminator", password="******", channels=["A"]) longpoll_docs = {} with concurrent.futures.ThreadPoolExecutor( max_workers=libraries.testkit.settings.MAX_REQUEST_WORKERS ) as executor: futures = dict() futures[executor.submit( user_no_channels.start_longpoll_changes_tracking, termination_doc_id="terminator")] = "polling" log_info("Starting longpoll feed") futures[executor.submit(a_doc_pusher.add_docs, num_docs=num_docs, bulk=True, name_prefix="a-doc")] = "a_docs_pushed" log_info("'A' channel docs pushing") for future in concurrent.futures.as_completed(futures): task_name = futures[future] if task_name == "a_docs_pushed": log_info("'A' channel docs pushed") time.sleep(5) log_info( "Grant 'user_no_channels' access to channel 'A' via sync function" ) access_doc_pusher.add_doc(doc_id="access_doc", content={ "accessUser": "******", "accessChannels": ["A"] }) time.sleep(5) log_info("'terminator' pushing termination doc") terminator.add_doc(doc_id="terminator") if task_name == "polling": log_info("Getting changes from longpoll") longpoll_docs, last_seq = future.result() log_info( "Verify docs in longpoll changes are the expected docs") log_info( "Verifying 'user_no_channels' has same docs as 'a_doc_pusher' + access_doc" ) # One off changes verification will include the termination doc expected_docs = { k: v for cache in [a_doc_pusher.cache, terminator.cache] for k, v in cache.items() } verify_changes(user_no_channels, expected_num_docs=num_docs + 1, expected_num_revisions=0, expected_docs=expected_docs) # TODO: Fix this inconsistency suite wide # Longpoll docs do not save termination doc log_info("Verify docs in longpoll changes are the expected docs") verify_same_docs(num_docs, longpoll_docs, a_doc_pusher.cache)
def test_sync_role_sanity(params_from_base_test_setup, sg_conf_name): num_docs_per_channel = 100 tv_channels = ["ABC", "NBC", "CBS"] cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'sync_role_sanity'") log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) admin = Admin(cluster.sync_gateways[0]) admin.create_role(db="db", name="tv_stations", channels=tv_channels) seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="******") doc_pushers = [] doc_pusher_caches = [] # Push some ABC docs for tv_channel in tv_channels: doc_pusher = admin.register_user( target=cluster.sync_gateways[0], db="db", name="{}_doc_pusher".format(tv_channel), password="******", channels=[tv_channel]) doc_pusher.add_docs(num_docs_per_channel, bulk=True) doc_pushers.append(doc_pusher) doc_pusher_caches.append(doc_pusher.cache) # Before role access grant verify_changes(seth, expected_num_docs=0, expected_num_revisions=0, expected_docs={}) # Create access doc pusher and grant access Seth to ABC channel access_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="access_doc_pusher", password="******", channels=["access"]) access_doc_pusher.add_doc(doc_id="access_doc", content={"grant_access": "true"}) # Allow docs to backfill time.sleep(5) all_tv_docs = { k: v for cache in doc_pusher_caches for k, v in cache.items() } verify_changes(seth, expected_num_docs=num_docs_per_channel * len(tv_channels), expected_num_revisions=0, expected_docs=all_tv_docs) # Remove seth from tv_stations role access_doc_pusher.update_doc(doc_id="access_doc", content={"grant_access": "false"}) # Allow docs to backfill time.sleep(5) # Verify seth sees no tv_stations channel docs verify_changes(seth, expected_num_docs=0, expected_num_revisions=0, expected_docs={}) # Push more ABC docs for doc_pusher in doc_pushers: doc_pusher.add_docs(num_docs_per_channel, bulk=True) # Allow docs to backfill time.sleep(5) # Verify seth sees no tv_stations channel docs verify_changes(seth, expected_num_docs=0, expected_num_revisions=0, expected_docs={})
def test_sync_role_sanity(params_from_base_test_setup, sg_conf_name): num_docs_per_channel = 100 tv_channels = ["ABC", "NBC", "CBS"] cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'sync_role_sanity'") log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) admin = Admin(cluster.sync_gateways[0]) admin.create_role(db="db", name="tv_stations", channels=tv_channels) seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="******") doc_pushers = [] doc_pusher_caches = [] # Push some ABC docs for tv_channel in tv_channels: doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="{}_doc_pusher".format(tv_channel), password="******", channels=[tv_channel]) doc_pusher.add_docs(num_docs_per_channel, bulk=True) doc_pushers.append(doc_pusher) doc_pusher_caches.append(doc_pusher.cache) # Before role access grant verify_changes(seth, expected_num_docs=0, expected_num_revisions=0, expected_docs={}) # Create access doc pusher and grant access Seth to ABC channel access_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="access_doc_pusher", password="******", channels=["access"]) access_doc_pusher.add_doc(doc_id="access_doc", content={"grant_access": "true"}) # Allow docs to backfill time.sleep(5) all_tv_docs = {k: v for cache in doc_pusher_caches for k, v in cache.items()} verify_changes(seth, expected_num_docs=num_docs_per_channel * len(tv_channels), expected_num_revisions=0, expected_docs=all_tv_docs) # Remove seth from tv_stations role access_doc_pusher.update_doc(doc_id="access_doc", content={"grant_access": "false"}) # Allow docs to backfill time.sleep(5) # Verify seth sees no tv_stations channel docs verify_changes(seth, expected_num_docs=0, expected_num_revisions=0, expected_docs={}) # Push more ABC docs for doc_pusher in doc_pushers: doc_pusher.add_docs(num_docs_per_channel, bulk=True) # Allow docs to backfill time.sleep(5) # Verify seth sees no tv_stations channel docs verify_changes(seth, expected_num_docs=0, expected_num_revisions=0, expected_docs={})
def test_roles_sanity(params_from_base_test_setup, sg_conf_name): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'roles_sanity'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) radio_stations = ["KMOW", "HWOD", "KDWB"] tv_stations = ["ABC", "CBS", "NBC"] number_of_djs = 10 number_of_vjs = 10 number_of_docs_per_pusher = 500 admin = Admin(cluster.sync_gateways[0]) admin.create_role("db", name="radio_stations", channels=radio_stations) admin.create_role("db", name="tv_stations", channels=tv_stations) djs = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="dj", number=number_of_djs, password="******", roles=["radio_stations"]) vjs = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="vj", number=number_of_vjs, password="******", roles=["tv_stations"]) mogul = admin.register_user(target=cluster.sync_gateways[0], db="db", name="mogul", password="******", roles=["tv_stations", "radio_stations"]) radio_doc_caches = [] for radio_station in radio_stations: doc_pusher = admin.register_user( target=cluster.sync_gateways[0], db="db", name="{}_doc_pusher".format(radio_station), password="******", channels=[radio_station]) doc_pusher.add_docs(number_of_docs_per_pusher, bulk=True) radio_doc_caches.append(doc_pusher.cache) radio_docs = {k: v for cache in radio_doc_caches for k, v in cache.items()} tv_doc_caches = [] for tv_station in tv_stations: doc_pusher = admin.register_user( target=cluster.sync_gateways[0], db="db", name="{}_doc_pusher".format(tv_station), password="******", channels=[tv_station]) doc_pusher.add_docs(number_of_docs_per_pusher, bulk=True) tv_doc_caches.append(doc_pusher.cache) tv_docs = {k: v for cache in tv_doc_caches for k, v in cache.items()} # Verify djs get docs for all the channels associated with the radio_stations role expected_num_radio_docs = len(radio_stations) * number_of_docs_per_pusher verify_changes(djs, expected_num_docs=expected_num_radio_docs, expected_num_revisions=0, expected_docs=radio_docs) # Verify vjs get docs for all the channels associated with the tv_stations role expected_num_tv_docs = len(tv_stations) * number_of_docs_per_pusher verify_changes(vjs, expected_num_docs=expected_num_tv_docs, expected_num_revisions=0, expected_docs=tv_docs) # Verify mogul gets docs for all the channels associated with the radio_stations + tv_stations roles all_docs_caches = list(radio_doc_caches) all_docs_caches.extend(tv_doc_caches) all_docs = {k: v for cache in all_docs_caches for k, v in cache.items()} verify_changes(mogul, expected_num_docs=expected_num_radio_docs + expected_num_tv_docs, expected_num_revisions=0, expected_docs=all_docs)
def test_continuous_changes_sanity(params_from_base_test_setup, sg_conf_name, num_docs, num_revisions): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'continuous_changes_sanity'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("sg_conf: {}".format(sg_conf)) log_info("num_docs: {}".format(num_docs)) log_info("num_revisions: {}".format(num_revisions)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) admin = Admin(cluster.sync_gateways[0]) seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="******", channels=["ABC", "TERMINATE"]) abc_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="abc_doc_pusher", password="******", channels=["ABC"]) doc_terminator = admin.register_user(target=cluster.sync_gateways[0], db="db", name="doc_terminator", password="******", channels=["TERMINATE"]) docs_in_changes = dict() with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor: futures = dict() futures[executor.submit( seth.start_continuous_changes_tracking, termination_doc_id="killcontinuous")] = "continuous" futures[executor.submit(abc_doc_pusher.add_docs, num_docs)] = "doc_pusher" for future in concurrent.futures.as_completed(futures): task_name = futures[future] # Send termination doc to seth continuous changes feed subscriber if task_name == "doc_pusher": abc_doc_pusher.update_docs(num_revs_per_doc=num_revisions) time.sleep(5) doc_terminator.add_doc("killcontinuous") elif task_name == "continuous": docs_in_changes = future.result() # Expect number of docs + the termination doc verify_changes(abc_doc_pusher, expected_num_docs=num_docs, expected_num_revisions=num_revisions, expected_docs=abc_doc_pusher.cache) # Expect number of docs + the termination doc + _user doc verify_same_docs(expected_num_docs=num_docs, doc_dict_one=docs_in_changes, doc_dict_two=abc_doc_pusher.cache)
def test_overloaded_channel_cache(params_from_base_test_setup, sg_conf_name, num_docs, user_channels, filter, limit): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] if mode == "di": pytest.skip("Unsupported feature in distributed index") sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'test_overloaded_channel_cache'") log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) log_info("Using num_docs: {}".format(num_docs)) log_info("Using user_channels: {}".format(user_channels)) log_info("Using filter: {}".format(filter)) log_info("Using limit: {}".format(limit)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) target_sg = cluster.sync_gateways[0] admin = Admin(target_sg) users = admin.register_bulk_users(target_sg, "db", "user", 1000, "password", [user_channels]) assert len(users) == 1000 doc_pusher = admin.register_user(target_sg, "db", "abc_doc_pusher", "password", ["ABC"]) doc_pusher.add_docs(num_docs, bulk=True) # Give a few seconds to let changes register time.sleep(2) start = time.time() with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor: changes_requests = [] errors = [] for user in users: if filter and limit is not None: changes_requests.append(executor.submit(user.get_changes, since=0, limit=limit, filter="sync_gateway/bychannel", channels=["ABC"])) elif filter and limit is None: changes_requests.append(executor.submit(user.get_changes, filter="sync_gateway/bychannel", channels=["ABC"])) elif not filter and limit is not None: changes_requests.append(executor.submit(user.get_changes, limit=limit)) elif not filter and limit is None: changes_requests.append(executor.submit(user.get_changes)) for future in concurrent.futures.as_completed(changes_requests): changes = future.result() if limit is not None: assert len(changes["results"]) == 50 else: assert len(changes["results"]) == 5001 # changes feed should all be successful log_info(len(errors)) assert len(errors) == 0 if limit is not None: # HACK: Should be less than a minute unless blocking on view calls end = time.time() time_for_users_to_get_all_changes = end - start log_info("Time for users to get all changes: {}".format(time_for_users_to_get_all_changes)) assert time_for_users_to_get_all_changes < 120, "Time to get all changes was greater than a minute: {}s".format( time_for_users_to_get_all_changes ) # Sanity check that a subset of users have _changes feed intact for i in range(10): verify_changes(users[i], expected_num_docs=num_docs, expected_num_revisions=0, expected_docs=doc_pusher.cache) # Get sync_gateway expvars resp = requests.get(url="http://{}:4985/_expvar".format(target_sg.ip)) resp.raise_for_status() resp_obj = resp.json() if user_channels == "*" and num_docs == 5000: # "*" channel includes _user docs so the verify_changes will result in 10 view queries assert resp_obj["syncGateway_changeCache"]["view_queries"] == 10 else: # If number of view queries == 0 the key will not exist in the expvars assert "view_queries" not in resp_obj["syncGateway_changeCache"]
def test_bucket_online_offline_resync_sanity(params_from_base_test_setup, sg_conf_name, num_users, num_docs, num_revisions): cluster_conf = params_from_base_test_setup["cluster_config"] test_mode = params_from_base_test_setup["mode"] if test_mode == "di": pytest.skip("Unsupported feature in distributed index") sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, test_mode) log_info("Running 'test_bucket_online_offline_resync_sanity'") log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using num_users: {}".format(num_users)) log_info("Using num_docs: {}".format(num_docs)) log_info("Using num_revisions: {}".format(num_revisions)) start = time.time() cluster = Cluster(config=cluster_conf) cluster.reset(sg_conf) init_completed = time.time() log_info("Initialization completed. Time taken:{}s".format(init_completed - start)) num_channels = 1 channels = ["channel-" + str(i) for i in range(num_channels)] password = "******" sgs = cluster.sync_gateways admin = Admin(sgs[0]) # Register User log_info("Register User") user_objects = admin.register_bulk_users(target=sgs[0], db="db", name_prefix="User", number=num_users, password=password, channels=channels) user_x = admin.register_user(target=sgs[0], db="db", name="User-X", password="******", channels=["channel_x"]) # Add User log_info("Add docs") in_parallel(user_objects, 'add_docs', num_docs) # Update docs log_info("Update docs") in_parallel(user_objects, 'update_docs', num_revisions) time.sleep(10) # Get changes for all users in_parallel(user_objects, 'get_changes') # every user should have same number of docs # total/expected docs = num_users * num_docs recieved_docs = in_parallel(user_objects, 'get_num_docs') expected_docs = num_users * num_docs for user_obj, docs in recieved_docs.items(): log_info('User {} got {} docs, expected docs: {}'.format(user_obj.name, docs, expected_docs)) assert docs == expected_docs # Verify that # user created doc-ids exist in docs received in changes feed # expected revision is equal to received revision expected_revision = str(num_revisions + 1) docs_rev_dict = in_parallel(user_objects, 'get_num_revisions') rev_errors = [] for user_obj, docs_revision_dict in docs_rev_dict.items(): for doc_id in docs_revision_dict.keys(): rev = docs_revision_dict[doc_id] log_info('User {} doc_id {} has {} revisions, expected revision: {}'.format(user_obj.name, doc_id, rev, expected_revision)) if rev != expected_revision: rev_errors.append(doc_id) log_error('User {} doc_id {} got revision {}, expected revision {}'.format( user_obj.name, doc_id, rev, expected_revision) ) assert len(rev_errors) == 0 # Verify each User created docs are part of changes feed output = in_parallel(user_objects, 'check_doc_ids_in_changes_feed') assert True in output.values() # Take "db" offline status = admin.take_db_offline(db="db") assert status == 200 sg_restart_config = sync_gateway_config_path_for_mode("bucket_online_offline/db_online_offline_access_restricted", test_mode) restart_status = cluster.sync_gateways[0].restart(sg_restart_config) assert restart_status == 0 time.sleep(10) num_changes = admin.db_resync(db="db") log_info("expecting num_changes {} == num_docs {} * num_users {}".format(num_changes, num_docs, num_users)) assert num_changes['payload']['changes'] == num_docs * num_users status = admin.bring_db_online(db="db") assert status == 200 time.sleep(5) global_cache = list() for user in user_objects: global_cache.append(user.cache) all_docs = {k: v for user_cache in global_cache for k, v in user_cache.items()} verify_changes(user_x, expected_num_docs=expected_docs, expected_num_revisions=num_revisions, expected_docs=all_docs) end = time.time() log_info("Test ended.") log_info("Main test duration: {}".format(end - init_completed)) log_info("Test setup time: {}".format(init_completed - start)) log_info("Total Time taken: {}s".format(end - start))
def test_bucket_online_offline_resync_with_offline(params_from_base_test_setup, sg_conf_name, num_users, num_docs, num_revisions): start = time.time() cluster_conf = params_from_base_test_setup["cluster_config"] test_mode = params_from_base_test_setup["mode"] if test_mode == "di": pytest.skip("Unsupported feature in distributed index") sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, test_mode) log_info("Running 'test_bucket_online_offline_resync_with_online'") log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using num_users: {}".format(num_users)) log_info("Using num_docs: {}".format(num_docs)) log_info("Using num_revisions: {}".format(num_revisions)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_conf) init_completed = time.time() log_info("Initialization completed. Time taken:{}s".format(init_completed - start)) num_channels = 1 channels = ["channel-" + str(i) for i in range(num_channels)] password = "******" sgs = cluster.sync_gateways admin = Admin(sgs[0]) # Register User log_info("Register User") user_objects = admin.register_bulk_users(target=sgs[0], db="db", name_prefix="User", number=num_users, password=password, channels=channels) user_x = admin.register_user(target=sgs[0], db="db", name="User-X", password="******", channels=["channel_x"]) # Add User log_info("Add docs") in_parallel(user_objects, 'add_docs', num_docs) # Update docs log_info("Update docs") in_parallel(user_objects, 'update_docs', num_revisions) time.sleep(10) # Get changes for all users in_parallel(user_objects, 'get_changes') # every user should have same number of docs # total/expected docs = num_users * num_docs recieved_docs = in_parallel(user_objects, 'get_num_docs') expected_docs = num_users * num_docs for user_obj, docs in recieved_docs.items(): log_info('User {} got {} docs, expected docs: {}'.format( user_obj.name, docs, expected_docs)) assert docs == expected_docs # Verify that # user created doc-ids exist in docs received in changes feed # expected revision is equal to received revision expected_revision = str(num_revisions + 1) docs_rev_dict = in_parallel(user_objects, 'get_num_revisions') rev_errors = [] for user_obj, docs_revision_dict in docs_rev_dict.items(): for doc_id in docs_revision_dict.keys(): rev = docs_revision_dict[doc_id] log_info( 'User {} doc_id {} has {} revisions, expected revision: {}'. format(user_obj.name, doc_id, rev, expected_revision)) if rev != expected_revision: rev_errors.append(doc_id) log_error( 'User {} doc_id {} got revision {}, expected revision {}'. format(user_obj.name, doc_id, rev, expected_revision)) assert len(rev_errors) == 0 # Verify each User created docs are part of changes feed output = in_parallel(user_objects, 'check_doc_ids_in_changes_feed') assert True in output.values() # Take "db" offline sg_client = MobileRestClient() status = sg_client.take_db_offline(cluster_conf=cluster_conf, db="db") assert status == 0 sg_restart_config = sync_gateway_config_path_for_mode( "bucket_online_offline/db_online_offline_access_restricted", test_mode) restart_status = cluster.sync_gateways[0].restart(sg_restart_config) assert restart_status == 0 log_info("Sleeping....") time.sleep(10) pool = ThreadPool(processes=1) log_info("Restarted SG....") time.sleep(5) db_info = admin.get_db_info("db") log_info("Status of db = {}".format(db_info["state"])) assert db_info["state"] == "Offline" try: async_resync_result = pool.apply_async(admin.db_resync, ("db", )) log_info("resync issued !!!!!!") except Exception as e: log_info("Catch resync exception: {}".format(e)) time.sleep(1) resync_occured = False for i in range(20): db_info = admin.get_db_info("db") log_info("Status of db = {}".format(db_info["state"])) if db_info["state"] == "Resyncing": resync_occured = True log_info("Resync occured") try: status = admin.get_db_info(db="db") log_info("Got db_info request status: {}".format(status)) except HTTPError as e: log_info("status = {} exception = {}".format( status, e.response.status_code)) assert False else: log_info("Got 200 ok for supported operation") time.sleep(1) if resync_occured: break time.sleep(10) status = sg_client.bring_db_online(cluster_conf=cluster_conf, db="db") log_info("online request issued !!!!! response status: {}".format(status)) time.sleep(5) db_info = admin.get_db_info("db") log_info("Status of db = {}".format(db_info["state"])) assert db_info["state"] == "Online" resync_result = async_resync_result.get() log_info("resync_changes {}".format(resync_result)) log_info("expecting num_changes == num_docs {} * num_users {}".format( num_docs, num_users)) assert resync_result['payload']['changes'] == num_docs * num_users assert resync_result['status_code'] == 200 time.sleep(5) global_cache = list() for user in user_objects: global_cache.append(user.cache) all_docs = { k: v for user_cache in global_cache for k, v in user_cache.items() } verify_changes(user_x, expected_num_docs=expected_docs, expected_num_revisions=num_revisions, expected_docs=all_docs) end = time.time() log_info("Test ended.") log_info("Main test duration: {}".format(end - init_completed)) log_info("Test setup time: {}".format(init_completed - start)) log_info("Total Time taken: {}s".format(end - start))
def test_dcp_reshard_sync_gateway_comes_up(params_from_base_test_setup, sg_conf): cluster_conf = params_from_base_test_setup["cluster_config"] log_info("Running 'test_dcp_reshard_sync_gateway_goes_down'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) stop_status = cluster.sg_accels[0].stop() assert stop_status == 0, "Failed to stop sg_accel" admin = Admin(cluster.sync_gateways[0]) traun = admin.register_user(target=cluster.sync_gateways[0], db="db", name="traun", password="******", channels=["ABC", "NBC", "CBS"]) seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="******", channels=["FOX"]) log_info(">> Users added") with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor: futures = dict() time.sleep(5) log_info(">>> Adding Traun docs") # ABC, NBC, CBS futures[executor.submit(traun.add_docs, 6000)] = "traun" log_info(">>> Adding Seth docs") # FOX futures[executor.submit(seth.add_docs, 4000)] = "seth" # Bring up a sync_gateway accel up_status = cluster.sg_accels[0].start(sg_conf) assert up_status == 0 for future in concurrent.futures.as_completed(futures): tag = futures[future] log_info("{} Completed:".format(tag)) # TODO better way to do this time.sleep(120) verify_changes(traun, expected_num_docs=6000, expected_num_revisions=0, expected_docs=traun.cache) verify_changes(seth, expected_num_docs=4000, expected_num_revisions=0, expected_docs=seth.cache)
def test_bucket_online_offline_resync_sanity(params_from_base_test_setup, sg_conf_name, num_users, num_docs, num_revisions): cluster_conf = params_from_base_test_setup["cluster_config"] test_mode = params_from_base_test_setup["mode"] if test_mode == "di": pytest.skip("Unsupported feature in distributed index") sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, test_mode) log_info("Running 'test_bucket_online_offline_resync_sanity'") log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using num_users: {}".format(num_users)) log_info("Using num_docs: {}".format(num_docs)) log_info("Using num_revisions: {}".format(num_revisions)) start = time.time() cluster = Cluster(config=cluster_conf) cluster.reset(sg_conf) init_completed = time.time() log_info("Initialization completed. Time taken:{}s".format(init_completed - start)) num_channels = 1 channels = ["channel-" + str(i) for i in range(num_channels)] password = "******" sgs = cluster.sync_gateways admin = Admin(sgs[0]) # Register User log_info("Register User") user_objects = admin.register_bulk_users(target=sgs[0], db="db", name_prefix="User", number=num_users, password=password, channels=channels) user_x = admin.register_user(target=sgs[0], db="db", name="User-X", password="******", channels=["channel_x"]) # Add User log_info("Add docs") in_parallel(user_objects, 'add_docs', num_docs) # Update docs log_info("Update docs") in_parallel(user_objects, 'update_docs', num_revisions) time.sleep(10) # Get changes for all users in_parallel(user_objects, 'get_changes') # every user should have same number of docs # total/expected docs = num_users * num_docs recieved_docs = in_parallel(user_objects, 'get_num_docs') expected_docs = num_users * num_docs for user_obj, docs in recieved_docs.items(): log_info('User {} got {} docs, expected docs: {}'.format( user_obj.name, docs, expected_docs)) assert docs == expected_docs # Verify that # user created doc-ids exist in docs received in changes feed # expected revision is equal to received revision expected_revision = str(num_revisions + 1) docs_rev_dict = in_parallel(user_objects, 'get_num_revisions') rev_errors = [] for user_obj, docs_revision_dict in docs_rev_dict.items(): for doc_id in docs_revision_dict.keys(): rev = docs_revision_dict[doc_id] log_info( 'User {} doc_id {} has {} revisions, expected revision: {}'. format(user_obj.name, doc_id, rev, expected_revision)) if rev != expected_revision: rev_errors.append(doc_id) log_error( 'User {} doc_id {} got revision {}, expected revision {}'. format(user_obj.name, doc_id, rev, expected_revision)) assert len(rev_errors) == 0 # Verify each User created docs are part of changes feed output = in_parallel(user_objects, 'check_doc_ids_in_changes_feed') assert True in output.values() # Take "db" offline sg_client = MobileRestClient() status = sg_client.take_db_offline(cluster_conf=cluster_conf, db="db") assert status == 0 sg_restart_config = sync_gateway_config_path_for_mode( "bucket_online_offline/db_online_offline_access_restricted", test_mode) restart_status = cluster.sync_gateways[0].restart(sg_restart_config) assert restart_status == 0 time.sleep(10) num_changes = admin.db_resync(db="db") log_info("expecting num_changes {} == num_docs {} * num_users {}".format( num_changes, num_docs, num_users)) assert num_changes['payload']['changes'] == num_docs * num_users # Take "db" online status = sg_client.bring_db_online(cluster_conf=cluster_conf, db="db") assert status == 0 time.sleep(5) global_cache = list() for user in user_objects: global_cache.append(user.cache) all_docs = { k: v for user_cache in global_cache for k, v in user_cache.items() } verify_changes(user_x, expected_num_docs=expected_docs, expected_num_revisions=num_revisions, expected_docs=all_docs) end = time.time() log_info("Test ended.") log_info("Main test duration: {}".format(end - init_completed)) log_info("Test setup time: {}".format(init_completed - start)) log_info("Total Time taken: {}s".format(end - start))
def test_multiple_users_multiple_channels(params_from_base_test_setup, sg_conf_name): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'multiple_users_multiple_channels'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) num_docs_seth = 1000 num_docs_adam = 2000 num_docs_traun = 3000 sgs = cluster.sync_gateways admin = Admin(sgs[0]) seth = admin.register_user(target=sgs[0], db="db", name="seth", password="******", channels=["ABC"]) adam = admin.register_user(target=sgs[0], db="db", name="adam", password="******", channels=["NBC", "CBS"]) traun = admin.register_user(target=sgs[0], db="db", name="traun", password="******", channels=["ABC", "NBC", "CBS"]) # TODO use bulk docs seth.add_docs(num_docs_seth) # ABC adam.add_docs(num_docs_adam) # NBC, CBS traun.add_docs(num_docs_traun) # ABC, NBC, CBS assert len(seth.cache) == num_docs_seth assert len(adam.cache) == num_docs_adam assert len(traun.cache) == num_docs_traun # discuss appropriate time with team time.sleep(10) # Seth should get docs from seth + traun seth_subset = [seth.cache, traun.cache] seth_expected_docs = { k: v for cache in seth_subset for k, v in cache.items() } verify_changes([seth], expected_num_docs=num_docs_seth + num_docs_traun, expected_num_revisions=0, expected_docs=seth_expected_docs) # Adam should get docs from adam + traun adam_subset = [adam.cache, traun.cache] adam_expected_docs = { k: v for cache in adam_subset for k, v in cache.items() } verify_changes([adam], expected_num_docs=num_docs_adam + num_docs_traun, expected_num_revisions=0, expected_docs=adam_expected_docs) # Traun should get docs from seth + adam + traun traun_subset = [seth.cache, adam.cache, traun.cache] traun_expected_docs = { k: v for cache in traun_subset for k, v in cache.items() } verify_changes([traun], expected_num_docs=num_docs_seth + num_docs_adam + num_docs_traun, expected_num_revisions=0, expected_docs=traun_expected_docs)
def test_overloaded_channel_cache(params_from_base_test_setup, sg_conf_name, num_docs, user_channels, filter, limit): """ The purpose of this test is to verify that channel cache backfill via view queries is working properly. It works by doing the following: - Set channel cache size in Sync Gateway config to a small number, eg, 750. This means that only 750 docs fit in the channel cache - Add a large number of docs, eg, 1000. - Issue a _changes request that will return all 1000 docs Expected behavior / Verification: - Since 1000 docs requested from changes feed, but only 750 docs fit in channel cache, then it will need to do a view query to get the remaining 250 changes - Verify that the changes feed returns all 1000 expected docs - Check the expvar statistics to verify that view queries were made """ cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] if mode == "di": pytest.skip("Unsupported feature in distributed index") sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'test_overloaded_channel_cache'") log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) log_info("Using num_docs: {}".format(num_docs)) log_info("Using user_channels: {}".format(user_channels)) log_info("Using filter: {}".format(filter)) log_info("Using limit: {}".format(limit)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) target_sg = cluster.sync_gateways[0] admin = Admin(target_sg) users = admin.register_bulk_users(target_sg, "db", "user", 1000, "password", [user_channels]) assert len(users) == 1000 doc_pusher = admin.register_user(target_sg, "db", "abc_doc_pusher", "password", ["ABC"]) doc_pusher.add_docs(num_docs, bulk=True) # Give a few seconds to let changes register time.sleep(2) start = time.time() # This uses a ProcessPoolExecutor due to https://github.com/couchbaselabs/mobile-testkit/issues/1142 with concurrent.futures.ProcessPoolExecutor(max_workers=100) as executor: changes_requests = [] errors = [] for user in users: if filter and limit is not None: changes_requests.append(executor.submit(user.get_changes, since=0, limit=limit, filter="sync_gateway/bychannel", channels=["ABC"])) elif filter and limit is None: changes_requests.append(executor.submit(user.get_changes, filter="sync_gateway/bychannel", channels=["ABC"])) elif not filter and limit is not None: changes_requests.append(executor.submit(user.get_changes, limit=limit)) elif not filter and limit is None: changes_requests.append(executor.submit(user.get_changes)) for future in concurrent.futures.as_completed(changes_requests): changes = future.result() if limit is not None: assert len(changes["results"]) == 50 else: assert len(changes["results"]) == 5001 # changes feed should all be successful log_info(len(errors)) assert len(errors) == 0 if limit is not None: # HACK: Should be less than a minute unless blocking on view calls end = time.time() time_for_users_to_get_all_changes = end - start log_info("Time for users to get all changes: {}".format(time_for_users_to_get_all_changes)) assert time_for_users_to_get_all_changes < 240, "Time to get all changes was greater than 2 minutes: {}s".format( time_for_users_to_get_all_changes ) # Sanity check that a subset of users have _changes feed intact for i in range(10): verify_changes(users[i], expected_num_docs=num_docs, expected_num_revisions=0, expected_docs=doc_pusher.cache) # Get sync_gateway expvars resp = requests.get(url="http://{}:4985/_expvar".format(target_sg.ip)) resp.raise_for_status() resp_obj = resp.json() # Since Sync Gateway will need to issue view queries to handle _changes requests that don't # fit in the channel cache, we expect there to be several view queries assert resp_obj["syncGateway_changeCache"]["view_queries"] > 0
def test_roles_sanity(params_from_base_test_setup, sg_conf_name): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'roles_sanity'") log_info("cluster_conf: {}".format(cluster_conf)) log_info("sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) radio_stations = ["KMOW", "HWOD", "KDWB"] tv_stations = ["ABC", "CBS", "NBC"] number_of_djs = 10 number_of_vjs = 10 number_of_docs_per_pusher = 500 admin = Admin(cluster.sync_gateways[0]) admin.create_role("db", name="radio_stations", channels=radio_stations) admin.create_role("db", name="tv_stations", channels=tv_stations) djs = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="dj", number=number_of_djs, password="******", roles=["radio_stations"]) vjs = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="vj", number=number_of_vjs, password="******", roles=["tv_stations"]) mogul = admin.register_user(target=cluster.sync_gateways[0], db="db", name="mogul", password="******", roles=["tv_stations", "radio_stations"]) radio_doc_caches = [] for radio_station in radio_stations: doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="{}_doc_pusher".format(radio_station), password="******", channels=[radio_station]) doc_pusher.add_docs(number_of_docs_per_pusher, bulk=True) radio_doc_caches.append(doc_pusher.cache) radio_docs = {k: v for cache in radio_doc_caches for k, v in cache.items()} tv_doc_caches = [] for tv_station in tv_stations: doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="{}_doc_pusher".format(tv_station), password="******", channels=[tv_station]) doc_pusher.add_docs(number_of_docs_per_pusher, bulk=True) tv_doc_caches.append(doc_pusher.cache) tv_docs = {k: v for cache in tv_doc_caches for k, v in cache.items()} # Verify djs get docs for all the channels associated with the radio_stations role expected_num_radio_docs = len(radio_stations) * number_of_docs_per_pusher verify_changes(djs, expected_num_docs=expected_num_radio_docs, expected_num_revisions=0, expected_docs=radio_docs) # Verify vjs get docs for all the channels associated with the tv_stations role expected_num_tv_docs = len(tv_stations) * number_of_docs_per_pusher verify_changes(vjs, expected_num_docs=expected_num_tv_docs, expected_num_revisions=0, expected_docs=tv_docs) # Verify mogul gets docs for all the channels associated with the radio_stations + tv_stations roles all_docs_caches = list(radio_doc_caches) all_docs_caches.extend(tv_doc_caches) all_docs = {k: v for cache in all_docs_caches for k, v in cache.items()} verify_changes(mogul, expected_num_docs=expected_num_radio_docs + expected_num_tv_docs, expected_num_revisions=0, expected_docs=all_docs)