def test_offline_false_config_rest(params_from_base_test_setup, sg_conf_name, num_docs): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) log_info("Using num_docs: {}".format(num_docs)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) # all db endpoints should function as expected errors = rest_scan(cluster.sync_gateways[0], db="db", online=True, num_docs=num_docs, user_name="seth", channels=["ABC"]) assert len(errors) == 0 # Scenario 4 # Check the db has an Online state at each running sync_gateway for sg in cluster.sync_gateways: admin = Admin(sg) db_info = admin.get_db_info("db") assert db_info["state"] == "Online"
def test_db_delayed_online(params_from_base_test_setup, sg_conf_name, num_docs): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) log_info("Using num_docs: {}".format(num_docs)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) admin = Admin(cluster.sync_gateways[0]) time.sleep(2) status = admin.take_db_offline("db") log_info("offline request response status: {}".format(status)) time.sleep(10) pool = ThreadPool(processes=1) db_info = admin.get_db_info("db") assert db_info["state"] == "Offline" async_result = pool.apply_async(admin.bring_db_online, ("db", 15,)) status = async_result.get(timeout=15) log_info("offline request response status: {}".format(status)) time.sleep(20) db_info = admin.get_db_info("db") assert db_info["state"] == "Online" # all db rest enpoints should succeed errors = rest_scan(cluster.sync_gateways[0], db="db", online=True, num_docs=num_docs, user_name="seth", channels=["ABC"]) assert len(errors) == 0
def test_bucket_online_offline_resync_with_offline(params_from_base_test_setup, sg_conf_name, num_users, num_docs, num_revisions): start = time.time() cluster_conf = params_from_base_test_setup["cluster_config"] test_mode = params_from_base_test_setup["mode"] if test_mode == "di": pytest.skip("Unsupported feature in distributed index") sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, test_mode) log_info("Running 'test_bucket_online_offline_resync_with_online'") log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using num_users: {}".format(num_users)) log_info("Using num_docs: {}".format(num_docs)) log_info("Using num_revisions: {}".format(num_revisions)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_conf) init_completed = time.time() log_info("Initialization completed. Time taken:{}s".format(init_completed - start)) num_channels = 1 channels = ["channel-" + str(i) for i in range(num_channels)] password = "******" sgs = cluster.sync_gateways admin = Admin(sgs[0]) # Register User log_info("Register User") user_objects = admin.register_bulk_users(target=sgs[0], db="db", name_prefix="User", number=num_users, password=password, channels=channels) user_x = admin.register_user(target=sgs[0], db="db", name="User-X", password="******", channels=["channel_x"]) # Add User log_info("Add docs") in_parallel(user_objects, 'add_docs', num_docs) # Update docs log_info("Update docs") in_parallel(user_objects, 'update_docs', num_revisions) time.sleep(10) # Get changes for all users in_parallel(user_objects, 'get_changes') # every user should have same number of docs # total/expected docs = num_users * num_docs recieved_docs = in_parallel(user_objects, 'get_num_docs') expected_docs = num_users * num_docs for user_obj, docs in recieved_docs.items(): log_info('User {} got {} docs, expected docs: {}'.format(user_obj.name, docs, expected_docs)) assert docs == expected_docs # Verify that # user created doc-ids exist in docs received in changes feed # expected revision is equal to received revision expected_revision = str(num_revisions + 1) docs_rev_dict = in_parallel(user_objects, 'get_num_revisions') rev_errors = [] for user_obj, docs_revision_dict in docs_rev_dict.items(): for doc_id in docs_revision_dict.keys(): rev = docs_revision_dict[doc_id] log_info('User {} doc_id {} has {} revisions, expected revision: {}'.format( user_obj.name, doc_id, rev, expected_revision )) if rev != expected_revision: rev_errors.append(doc_id) log_error('User {} doc_id {} got revision {}, expected revision {}'.format( user_obj.name, doc_id, rev, expected_revision )) assert len(rev_errors) == 0 # Verify each User created docs are part of changes feed output = in_parallel(user_objects, 'check_doc_ids_in_changes_feed') assert True in output.values() # Take "db" offline status = admin.take_db_offline(db="db") assert status == 200 sg_restart_config = sync_gateway_config_path_for_mode("bucket_online_offline/db_online_offline_access_restricted", test_mode) restart_status = cluster.sync_gateways[0].restart(sg_restart_config) assert restart_status == 0 log_info("Sleeping....") time.sleep(10) pool = ThreadPool(processes=1) log_info("Restarted SG....") time.sleep(5) db_info = admin.get_db_info("db") log_info("Status of db = {}".format(db_info["state"])) assert db_info["state"] == "Offline" try: async_resync_result = pool.apply_async(admin.db_resync, ("db",)) log_info("resync issued !!!!!!") except Exception as e: log_info("Catch resync exception: {}".format(e)) time.sleep(1) resync_occured = False for i in range(20): db_info = admin.get_db_info("db") log_info("Status of db = {}".format(db_info["state"])) if db_info["state"] == "Resyncing": resync_occured = True log_info("Resync occured") try: status = admin.get_db_info(db="db") log_info("Got db_info request status: {}".format(status)) except HTTPError as e: log_info("status = {} exception = {}".format(status, e.response.status_code)) assert False else: log_info("Got 200 ok for supported operation") time.sleep(1) if resync_occured: break time.sleep(10) status = admin.bring_db_online(db="db") log_info("online request issued !!!!! response status: {}".format(status)) time.sleep(5) db_info = admin.get_db_info("db") log_info("Status of db = {}".format(db_info["state"])) assert db_info["state"] == "Online" resync_result = async_resync_result.get() log_info("resync_changes {}".format(resync_result)) log_info("expecting num_changes == num_docs {} * num_users {}".format(num_docs, num_users)) assert resync_result['payload']['changes'] == num_docs * num_users assert resync_result['status_code'] == 200 time.sleep(5) global_cache = list() for user in user_objects: global_cache.append(user.cache) all_docs = {k: v for user_cache in global_cache for k, v in user_cache.items()} verify_changes(user_x, expected_num_docs=expected_docs, expected_num_revisions=num_revisions, expected_docs=all_docs) end = time.time() log_info("Test ended.") log_info("Main test duration: {}".format(end - init_completed)) log_info("Test setup time: {}".format(init_completed - start)) log_info("Total Time taken: {}s".format(end - start))
def rest_scan(sync_gateway, db, online, num_docs, user_name, channels): # Missing ADMIN # TODO: GET /{db}/_session/{session-id} # TODO: POST /{db}/_session # TODO: DELETE /{db}/_session/{session-id} # TODO: DELETE /{db}/_user/{name}/_session/{session-id} # TODO: DELETE /{db}/_user/{name}/_session # TODO: DELETE /{db}/_user/{name} # TODO: POST /{db}/_role/ # TODO: DELETE /{db}/_role/{name} # Missing REST # TODO: POST /{db}/_all_docs # TODO: DELETE /{db}/{doc} # TODO: PUT /{db}/{doc}/{attachment} # TODO: GET /{db}/{doc}/{attachment} # Missing Local Document # TODO: DELETE /{db}/{local-doc-id} # Missing Authentication # TODO: POST /{db}/_facebook_token admin = Admin(sync_gateway=sync_gateway) error_responses = list() # PUT /{db}/_role/{name} try: admin.create_role(db=db, name="radio_stations", channels=["HWOD", "KDWB"]) except HTTPError as e: log_info((e.response.url, e.response.status_code)) error_responses.append((e.response.url, e.response.status_code)) # GET /{db}/_role try: roles = admin.get_roles(db=db) log_info(roles) except HTTPError as e: log_info((e.response.url, e.response.status_code)) error_responses.append((e.response.url, e.response.status_code)) # GET /{db}/_role/{name} try: role = admin.get_role(db=db, name="radio_stations") log_info(role) except HTTPError as e: log_info((e.response.url, e.response.status_code)) error_responses.append((e.response.url, e.response.status_code)) # PUT /{db}/_user/{name} try: user = admin.register_user(target=sync_gateway, db=db, name=user_name, password="******", channels=channels) except HTTPError as e: log_info((e.response.url, e.response.status_code)) error_responses.append((e.response.url, e.response.status_code)) # GET /{db}/_user try: users_info = admin.get_users_info(db=db) log_info(users_info) except HTTPError as e: log_info((e.response.url, e.response.status_code)) error_responses.append((e.response.url, e.response.status_code)) # GET /{db}/_user/{name} try: user_info = admin.get_user_info(db=db, name=user_name) log_info(user_info) except HTTPError as e: log_info((e.response.url, e.response.status_code)) error_responses.append((e.response.url, e.response.status_code)) # GET /{db} try: db_info = admin.get_db_info(db=db) if not online: assert db_info["state"] == "Offline" else: assert db_info["state"] == "Online" log_info(db_info) except HTTPError as e: log_info((e.response.url, e.response.status_code)) error_responses.append((e.response.url, e.response.status_code)) # Create dummy user to hit endpoint if offline, user creation above will fail if not online: user = User(target=sync_gateway, db=db, name=user_name, password="******", channels=channels) # PUT /{db}/{name} add_docs_errors = user.add_docs(num_docs=num_docs) error_responses.extend(add_docs_errors) # POST /{db}/_bulk_docs bulk_doc_errors = user.add_docs(num_docs=num_docs, bulk=True) error_responses.extend(bulk_doc_errors) # POST /{db}/ for i in range(num_docs): try: user.add_doc() except HTTPError as e: log_info((e.response.url, e.response.status_code)) error_responses.append((e.response.url, e.response.status_code)) # GET /{db}/{name} # PUT /{db}/{name} if online: update_docs_errors = user.update_docs(num_revs_per_doc=1) error_responses.extend(update_docs_errors) else: try: # Try to hit the GET enpoint for "test-id" user.update_doc("test-id") except HTTPError as e: log_info((e.response.url, e.response.status_code)) error_responses.append((e.response.url, e.response.status_code)) # PUT /{db}/{local-doc-id} local_doc_id = uuid.uuid4() try: doc = user.add_doc("_local/{}".format(local_doc_id), content={"message": "I should not be replicated"}) except HTTPError as e: log_info((e.response.url, e.response.status_code)) error_responses.append((e.response.url, e.response.status_code)) # GET /{db}/{local-doc-id} try: doc = user.get_doc("_local/{}".format(local_doc_id)) assert doc["content"]["message"] == "I should not be replicated" except HTTPError as e: log_info((e.response.url, e.response.status_code)) error_responses.append((e.response.url, e.response.status_code)) # GET /{db}/_all_docs try: all_docs_result = user.get_all_docs() # num_docs /{db}/{doc} PUT + num_docs /{db}/_bulk_docs + num_docs POST /{db}/ assert len(all_docs_result["rows"]) == num_docs * 3 except HTTPError as e: log_info((e.response.url, e.response.status_code)) error_responses.append((e.response.url, e.response.status_code)) # POST /{db}/_bulk_get try: doc_ids = list(user.cache.keys()) first_ten_ids = doc_ids[:10] first_ten = user.get_docs(first_ten_ids) assert len(first_ten) == 10 except HTTPError as e: log_info((e.response.url, e.response.status_code)) error_responses.append((e.response.url, e.response.status_code)) # wait for changes time.sleep(2) # GET /{db}/_changes try: user.get_changes() # If successful, verify the _changes feed verify_changes(user, expected_num_docs=num_docs * 3, expected_num_revisions=1, expected_docs=user.cache) except HTTPError as e: log_info((e.response.url, e.response.status_code)) error_responses.append((e.response.url, e.response.status_code)) return error_responses
def test_db_online_offline_webhooks_offline(params_from_base_test_setup, sg_conf_name, num_users, num_channels, num_docs, num_revisions): start = time.time() cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] if mode == "di": pytest.skip("Offline tests not supported in Di mode -- see https://github.com/couchbase/sync_gateway/issues/2423#issuecomment-300841425") sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'test_db_online_offline_webhooks_offline'") log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using num_users: {}".format(num_users)) log_info("Using num_channels: {}".format(num_channels)) log_info("Using num_docs: {}".format(num_docs)) log_info("Using num_revisions: {}".format(num_revisions)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_conf) init_completed = time.time() log_info("Initialization completed. Time taken:{}s".format(init_completed - start)) channels = ["channel-" + str(i) for i in range(num_channels)] password = "******" ws = WebServer() ws.start() sgs = cluster.sync_gateways admin = Admin(sgs[0]) # Register User log_info("Register User") user_objects = admin.register_bulk_users(target=sgs[0], db="db", name_prefix="User", number=num_users, password=password, channels=channels) # Add User log_info("Add docs") in_parallel(user_objects, 'add_docs', num_docs) # Update docs log_info("Update docs") in_parallel(user_objects, 'update_docs', num_revisions) time.sleep(10) # Take db offline sg_client = MobileRestClient() status = sg_client.take_db_offline(cluster_conf=cluster_conf, db="db") assert status == 0 time.sleep(5) db_info = admin.get_db_info("db") log_info("Expecting db state {} found db state {}".format("Offline", db_info['state'])) assert db_info["state"] == "Offline" webhook_events = ws.get_data() time.sleep(5) log_info("webhook event {}".format(webhook_events)) try: last_event = webhook_events[-1] assert last_event['state'] == 'offline' # Bring db online status = sg_client.bring_db_online(cluster_conf=cluster_conf, db="db") assert status == 0 time.sleep(5) db_info = admin.get_db_info("db") log_info("Expecting db state {} found db state {}".format("Online", db_info['state'])) assert db_info["state"] == "Online" time.sleep(5) webhook_events = ws.get_data() last_event = webhook_events[-1] assert last_event['state'] == 'online' time.sleep(10) log_info("webhook event {}".format(webhook_events)) except IndexError: log_info("Received index error") raise finally: ws.stop()
def test_db_online_offline_webhooks_offline(params_from_base_test_setup, sg_conf_name, num_users, num_channels, num_docs, num_revisions): start = time.time() cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'test_db_online_offline_webhooks_offline'") log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using num_users: {}".format(num_users)) log_info("Using num_channels: {}".format(num_channels)) log_info("Using num_docs: {}".format(num_docs)) log_info("Using num_revisions: {}".format(num_revisions)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_conf) init_completed = time.time() log_info("Initialization completed. Time taken:{}s".format(init_completed - start)) channels = ["channel-" + str(i) for i in range(num_channels)] password = "******" ws = WebServer() ws.start() sgs = cluster.sync_gateways admin = Admin(sgs[0]) # Register User log_info("Register User") user_objects = admin.register_bulk_users(target=sgs[0], db="db", name_prefix="User", number=num_users, password=password, channels=channels) # Add User log_info("Add docs") in_parallel(user_objects, 'add_docs', num_docs) # Update docs log_info("Update docs") in_parallel(user_objects, 'update_docs', num_revisions) time.sleep(10) admin.take_db_offline("db") time.sleep(5) db_info = admin.get_db_info("db") log_info("Expecting db state {} found db state {}".format("Offline", db_info['state'])) assert db_info["state"] == "Offline" webhook_events = ws.get_data() time.sleep(5) log_info("webhook event {}".format(webhook_events)) last_event = webhook_events[-1] assert last_event['state'] == 'offline' admin.bring_db_online("db") time.sleep(5) db_info = admin.get_db_info("db") log_info("Expecting db state {} found db state {}".format("Online", db_info['state'])) assert db_info["state"] == "Online" webhook_events = ws.get_data() last_event = webhook_events[-1] assert last_event['state'] == 'online' time.sleep(10) log_info("webhook event {}".format(webhook_events)) ws.stop()
def test_bucket_online_offline_resync_with_offline(params_from_base_test_setup, sg_conf_name, num_users, num_docs, num_revisions): start = time.time() cluster_conf = params_from_base_test_setup["cluster_config"] test_mode = params_from_base_test_setup["mode"] if test_mode == "di": pytest.skip("Unsupported feature in distributed index") sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, test_mode) log_info("Running 'test_bucket_online_offline_resync_with_online'") log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using num_users: {}".format(num_users)) log_info("Using num_docs: {}".format(num_docs)) log_info("Using num_revisions: {}".format(num_revisions)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_conf) init_completed = time.time() log_info("Initialization completed. Time taken:{}s".format(init_completed - start)) num_channels = 1 channels = ["channel-" + str(i) for i in range(num_channels)] password = "******" sgs = cluster.sync_gateways admin = Admin(sgs[0]) # Register User log_info("Register User") user_objects = admin.register_bulk_users(target=sgs[0], db="db", name_prefix="User", number=num_users, password=password, channels=channels) user_x = admin.register_user(target=sgs[0], db="db", name="User-X", password="******", channels=["channel_x"]) # Add User log_info("Add docs") in_parallel(user_objects, 'add_docs', num_docs) # Update docs log_info("Update docs") in_parallel(user_objects, 'update_docs', num_revisions) time.sleep(10) # Get changes for all users in_parallel(user_objects, 'get_changes') # every user should have same number of docs # total/expected docs = num_users * num_docs recieved_docs = in_parallel(user_objects, 'get_num_docs') expected_docs = num_users * num_docs for user_obj, docs in recieved_docs.items(): log_info('User {} got {} docs, expected docs: {}'.format( user_obj.name, docs, expected_docs)) assert docs == expected_docs # Verify that # user created doc-ids exist in docs received in changes feed # expected revision is equal to received revision expected_revision = str(num_revisions + 1) docs_rev_dict = in_parallel(user_objects, 'get_num_revisions') rev_errors = [] for user_obj, docs_revision_dict in docs_rev_dict.items(): for doc_id in docs_revision_dict.keys(): rev = docs_revision_dict[doc_id] log_info( 'User {} doc_id {} has {} revisions, expected revision: {}'. format(user_obj.name, doc_id, rev, expected_revision)) if rev != expected_revision: rev_errors.append(doc_id) log_error( 'User {} doc_id {} got revision {}, expected revision {}'. format(user_obj.name, doc_id, rev, expected_revision)) assert len(rev_errors) == 0 # Verify each User created docs are part of changes feed output = in_parallel(user_objects, 'check_doc_ids_in_changes_feed') assert True in output.values() # Take "db" offline sg_client = MobileRestClient() status = sg_client.take_db_offline(cluster_conf=cluster_conf, db="db") assert status == 0 sg_restart_config = sync_gateway_config_path_for_mode( "bucket_online_offline/db_online_offline_access_restricted", test_mode) restart_status = cluster.sync_gateways[0].restart(sg_restart_config) assert restart_status == 0 log_info("Sleeping....") time.sleep(10) pool = ThreadPool(processes=1) log_info("Restarted SG....") time.sleep(5) db_info = admin.get_db_info("db") log_info("Status of db = {}".format(db_info["state"])) assert db_info["state"] == "Offline" try: async_resync_result = pool.apply_async(admin.db_resync, ("db", )) log_info("resync issued !!!!!!") except Exception as e: log_info("Catch resync exception: {}".format(e)) time.sleep(1) resync_occured = False for i in range(20): db_info = admin.get_db_info("db") log_info("Status of db = {}".format(db_info["state"])) if db_info["state"] == "Resyncing": resync_occured = True log_info("Resync occured") try: status = admin.get_db_info(db="db") log_info("Got db_info request status: {}".format(status)) except HTTPError as e: log_info("status = {} exception = {}".format( status, e.response.status_code)) assert False else: log_info("Got 200 ok for supported operation") time.sleep(1) if resync_occured: break time.sleep(10) status = sg_client.bring_db_online(cluster_conf=cluster_conf, db="db") log_info("online request issued !!!!! response status: {}".format(status)) time.sleep(5) db_info = admin.get_db_info("db") log_info("Status of db = {}".format(db_info["state"])) assert db_info["state"] == "Online" resync_result = async_resync_result.get() log_info("resync_changes {}".format(resync_result)) log_info("expecting num_changes == num_docs {} * num_users {}".format( num_docs, num_users)) assert resync_result['payload']['changes'] == num_docs * num_users assert resync_result['status_code'] == 200 time.sleep(5) global_cache = list() for user in user_objects: global_cache.append(user.cache) all_docs = { k: v for user_cache in global_cache for k, v in user_cache.items() } verify_changes(user_x, expected_num_docs=expected_docs, expected_num_revisions=num_revisions, expected_docs=all_docs) end = time.time() log_info("Test ended.") log_info("Main test duration: {}".format(end - init_completed)) log_info("Test setup time: {}".format(init_completed - start)) log_info("Total Time taken: {}s".format(end - start))