def install_couchbase_server(cluster_config, couchbase_server_config): log_info(cluster_config) log_info(couchbase_server_config) ansible_runner = AnsibleRunner(cluster_config) log_info(">>> Installing Couchbase Server") # Install Server server_baseurl, server_package_name = couchbase_server_config.get_baseurl_package() status = ansible_runner.run_ansible_playbook( "install-couchbase-server-package.yml", extra_vars={ "couchbase_server_package_base_url": server_baseurl, "couchbase_server_package_name": server_package_name } ) if status != 0: raise ProvisioningError("Failed to install Couchbase Server") # Wait for server to be in 'healthy state' print(">>> Waiting for server to be in 'healthy' state") cluster_keywords = ClusterKeywords() cluster_topology = cluster_keywords.get_cluster_topology(cluster_config) server_url = cluster_topology["couchbase_servers"][0] cb_server = CouchbaseServer(server_url) cb_server.wait_for_ready_state()
def test_writing_attachment_to_couchbase_server(params_from_base_test_setup, sg_conf_name): """ 1. Start sync_gateway with sync function that rejects all writes: function(doc, oldDoc) { throw({forbidden:"No writes!"}); } 2. Create a doc with attachment 3. Use CBS sdk to see if attachment doc exists. Doc ID will look like _sync:att:sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0= (where the suffix is the digest) 4. Assert att doc does not exist """ cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper = ClusterKeywords() cluster_helper.reset_cluster(cluster_config, sg_conf) topology = cluster_helper.get_cluster_topology(cluster_config) cbs_url = topology["couchbase_servers"][0] sg_url = topology["sync_gateways"][0]["public"] sg_url_admin = topology["sync_gateways"][0]["admin"] sg_db = "db" bucket = "data-bucket" log_info("Running 'test_writing_attachment_to_couchbase_server'") log_info("Using cbs_url: {}".format(cbs_url)) log_info("Using sg_url: {}".format(sg_url)) log_info("Using sg_url_admin: {}".format(sg_url_admin)) log_info("Using sg_db: {}".format(sg_db)) log_info("Using bucket: {}".format(bucket)) sg_user_name = "sg_user" sg_user_password = "******" sg_user_channels = ["NBC"] client = MobileRestClient() client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels) sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name) docs = client.add_docs(url=sg_url, db=sg_db, number=100, id_prefix=sg_db, channels=sg_user_channels, auth=sg_user_session) assert len(docs) == 100 # Create doc with attachment and push to sync_gateway doc_with_att = document.create_doc(doc_id="att_doc", content={"sample_key": "sample_val"}, attachment_name="sample_text.txt", channels=sg_user_channels) client.add_doc(url=sg_url, db=sg_db, doc=doc_with_att, auth=sg_user_session) server = CouchbaseServer(cbs_url) # Assert that the attachment doc gets written to couchbase server server_att_docs = server.get_server_docs_with_prefix(bucket=bucket, prefix="_sync:att:") num_att_docs = len(server_att_docs) assert num_att_docs == 1
def create_server_buckets(cluster_config, sync_gateway_config): # get the couchbase server url cluster_helper = ClusterKeywords() cluster_topology = cluster_helper.get_cluster_topology(cluster_config) # Handle the case of resources/cluster_configs/1sg, where we are targeting a # sync_gateway without a backing server if len(cluster_topology["couchbase_servers"]) == 0: log_info("The cluster_config: {} does not have a couchbase server. Skipping bucket creation!!".format(cluster_config)) return couchbase_server_url = cluster_topology["couchbase_servers"][0] # delete existing buckets cb_server = CouchbaseServer(couchbase_server_url) cb_server.delete_buckets() # find bucket names from sg config bucket_names = get_buckets_from_sync_gateway_config(sync_gateway_config.config_path) # create couchbase server buckets cb_server.create_buckets(bucket_names)
def test_server_goes_down_sanity(params_from_base_test_setup): """ 1. Start with a two node couchbase server cluster 2. Starting adding docs 3. Kill one of the server nodes and signal completion 4. Stop adding docs 5. Verify that that the expected docs are present and in the changes feed. 6. Start server again and add to cluster """ cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] cluster_helper = ClusterKeywords() sg_conf_name = "sync_gateway_default_functional_tests" sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper.reset_cluster(cluster_config=cluster_config, sync_gateway_config=sg_conf_path) topology = cluster_helper.get_cluster_topology(cluster_config) admin_sg = topology["sync_gateways"][0]["admin"] sg_url = topology["sync_gateways"][0]["public"] coucbase_servers = topology["couchbase_servers"] cbs_one_url = coucbase_servers[0] cbs_two_url = coucbase_servers[1] log_info("Running: 'test_server_goes_down_sanity'") log_info("cluster_config: {}".format(cluster_config)) log_info("admin_sg: {}".format(admin_sg)) log_info("sg_url: {}".format(sg_url)) log_info("cbs_one_url: {}".format(cbs_one_url)) log_info("cbs_two_url: {}".format(cbs_two_url)) sg_db = "db" num_docs = 100 sg_user_name = "seth" sg_user_password = "******" channels = ["ABC", "CBS"] client = MobileRestClient() main_server = CouchbaseServer(cbs_one_url) flakey_server = CouchbaseServer(cbs_two_url) client.create_user(admin_sg, sg_db, sg_user_name, sg_user_password, channels=channels) session = client.create_session(admin_sg, sg_db, sg_user_name) # Stop second server flakey_server.stop() # Try to add 100 docs in a loop until all succeed, if the never do, fail with timeout errors = num_docs # Wait 30 seconds for auto failover # (Minimum value suggested - http://docs.couchbase.com/admin/admin/Tasks/tasks-nodeFailover.html) # + 15 seconds to add docs timeout = 45 start = time.time() successful_add = False while not successful_add: # Fail tests if all docs do not succeed before timeout if (time.time() - start) > timeout: # Bring server back up before failing the test flakey_server.start() main_server.rebalance_in(coucbase_servers, flakey_server) raise TimeoutError("Failed to successfully put docs before timeout") try: docs = client.add_docs(url=sg_url, db=sg_db, number=num_docs, id_prefix=None, auth=session, channels=channels) # If the above add doc does not throw, it was a successfull add. successful_add = True except requests.exceptions.HTTPError as he: log_info("Failed to add docs: {}".format(he)) log_info("Seeing: {} errors".format(errors)) time.sleep(1) assert len(docs) == 100 client.verify_docs_present(url=sg_url, db=sg_db, expected_docs=docs, auth=session) try: client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=docs, auth=session, polling_interval=5) except keywords.exceptions.TimeoutException: # timeout verifying docs. Bring server back in to restore topology, then fail # Failing due to https://github.com/couchbase/sync_gateway/issues/2197 flakey_server.start() main_server.recover(flakey_server) main_server.rebalance_in(coucbase_servers, flakey_server) raise keywords.exceptions.TimeoutException("Failed to get all changes") # Test succeeded without timeout, bring server back into topology flakey_server.start() main_server.recover(flakey_server) main_server.rebalance_in(coucbase_servers, flakey_server) # Make sure all docs were not added before server was log_info("test_server_goes_down_sanity complete!")
def test_rebalance_sanity(params_from_base_test_setup): cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] cluster_helper = ClusterKeywords() sg_conf_name = "sync_gateway_default_functional_tests" sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper.reset_cluster(cluster_config=cluster_config, sync_gateway_config=sg_conf_path) topology = cluster_helper.get_cluster_topology(cluster_config) admin_sg_one = topology["sync_gateways"][0]["admin"] sg_one_url = topology["sync_gateways"][0]["public"] cluster_servers = topology["couchbase_servers"] cbs_one_url = cluster_servers[0] cbs_two_url = cluster_servers[1] log_info("Running: 'test_distributed_index_rebalance_sanity'") log_info("cluster_config: {}".format(cluster_config)) log_info("admin_sg: {}".format(admin_sg_one)) log_info("sg_url: {}".format(sg_one_url)) log_info("cbs_one_url: {}".format(cbs_one_url)) log_info("cbs_two_url: {}".format(cbs_two_url)) sg_db = "db" num_docs = 100 num_updates = 100 sg_user_name = "seth" sg_user_password = "******" channels = ["ABC", "CBS"] client = MobileRestClient() cb_server = CouchbaseServer(cbs_one_url) server_to_remove = CouchbaseServer(cbs_two_url) client.create_user(admin_sg_one, sg_db, sg_user_name, sg_user_password, channels=channels) session = client.create_session(admin_sg_one, sg_db, sg_user_name) with concurrent.futures.ThreadPoolExecutor(5) as executor: # Add docs to sg log_info("Adding docs to sync_gateway") docs = client.add_docs(sg_one_url, sg_db, num_docs, "test_doc", channels=channels, auth=session) assert len(docs) == num_docs # Start updating docs and rebalance out one CBS node log_info("Updating docs on sync_gateway") update_docs_task = executor.submit(client.update_docs, sg_one_url, sg_db, docs, num_updates, auth=session) # Run rebalance in background cb_server.rebalance_out(cluster_servers, server_to_remove) updated_docs = update_docs_task.result() log_info(updated_docs) # Verify docs / revisions present client.verify_docs_present(sg_one_url, sg_db, updated_docs, auth=session) # Verify docs revisions in changes feed client.verify_docs_in_changes(sg_one_url, sg_db, updated_docs, auth=session) # Rebalance Server back in to the pool cb_server.add_node(server_to_remove) cb_server.rebalance_in(cluster_servers, server_to_remove)
def test_server_goes_down_rebuild_channels(params_from_base_test_setup): """ 1. Start with a two node couchbase server cluster 2. Starting adding docs 3. Kill one of the server nodes and signal completion 4. Stop adding docs 5. Verify that that the expected docs are present and in the changes feed. 6. Start server again and add to cluster """ cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] cluster_helper = ClusterKeywords() sg_conf_name = "sync_gateway_default_functional_tests" sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper.reset_cluster(cluster_config=cluster_config, sync_gateway_config=sg_conf_path) topology = cluster_helper.get_cluster_topology(cluster_config) admin_sg = topology["sync_gateways"][0]["admin"] sg_url = topology["sync_gateways"][0]["public"] coucbase_servers = topology["couchbase_servers"] cbs_one_url = coucbase_servers[0] cbs_two_url = coucbase_servers[1] log_info("Running: 'test_server_goes_down_sanity'") log_info("cluster_config: {}".format(cluster_config)) log_info("admin_sg: {}".format(admin_sg)) log_info("sg_url: {}".format(sg_url)) log_info("cbs_one_url: {}".format(cbs_one_url)) log_info("cbs_two_url: {}".format(cbs_two_url)) sg_db = "db" num_docs = 100 admin_user_info = userinfo.UserInfo( name="admin", password="******", channels=["ABC"], roles=[] ) seth_user_info = userinfo.UserInfo( name="seth", password="******", channels=["ABC"], roles=[] ) client = MobileRestClient() main_server = CouchbaseServer(cbs_one_url) flakey_server = CouchbaseServer(cbs_two_url) admin_auth = client.create_user( admin_sg, sg_db, admin_user_info.name, admin_user_info.password, channels=admin_user_info.channels ) client.create_user( admin_sg, sg_db, seth_user_info.name, seth_user_info.password, channels=seth_user_info.channels ) seth_session = client.create_session(admin_sg, sg_db, seth_user_info.name) # allow any user docs to make it to changes initial_changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=seth_session) # push docs from admin docs = client.add_docs( url=sg_url, db=sg_db, number=num_docs, id_prefix=None, channels=admin_user_info.channels, auth=admin_auth ) assert len(docs) == num_docs client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=docs, auth=seth_session) changes_before_failover = client.get_changes(url=sg_url, db=sg_db, since=initial_changes["last_seq"], auth=seth_session) assert len(changes_before_failover["results"]) == num_docs # Stop server via 'service stop' flakey_server.stop() start = time.time() while True: # Fail tests if all docs do not succeed before timeout if (time.time() - start) > 60: # Bring server back up before failing the test flakey_server.start() main_server.recover(flakey_server) main_server.rebalance_in(coucbase_servers, flakey_server) raise keywords.exceptions.TimeoutError("Failed to rebuild changes") try: # Poll until failover happens (~30 second) client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=docs, auth=seth_session) # changes requests succeeded, exit loop break except requests.exceptions.HTTPError: # Changes will fail until failover of the down server happens. Wait and try again. log_info("/db/_changes failed due to server down. Retrying ...") time.sleep(1) # Verify no new changes changes = client.get_changes( url=sg_url, db=sg_db, since=changes_before_failover["last_seq"], auth=seth_session, feed="normal" ) assert len(changes["results"]) == 0 # Check that all changes are intact from initial changes request changes = client.get_changes(url=sg_url, db=sg_db, since=initial_changes["last_seq"], auth=seth_session) assert len(changes["results"]) == num_docs coucbase_servers = topology["couchbase_servers"] # Test succeeded without timeout, bring server back into topology flakey_server.start() main_server.recover(flakey_server) main_server.rebalance_in(coucbase_servers, flakey_server)
def test_attachment_revpos_when_ancestor_unavailable(params_from_base_test_setup, sg_conf_name): """ Creates a document with an attachment, then updates that document so that the body of the revision that originally pushed the document is no longer available. Add a new revision that's not a child of the active revision, and validate that it's uploaded successfully. Example: 1. Document is created with attachment at rev-1 2. Document is updated (strip digests and length, only put revpos & stub) multiple times on the server, goes to rev-4 3. Client attempts to add a new (conflicting) revision 2, with parent rev-1. 4. If the body of rev-1 is no longer available on the server (temporary backup of revision has expired, and is no longer stored in the in-memory rev cache), we were throwing an error to client because we couldn't verify based on the _attachments property in rev-1. 5. In this scenario, before returning error, we are now checking if the active revision has a common ancestor with the incoming revision. If so, we can validate any revpos values equal to or earlier than the common ancestor against the active revision """ cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper = ClusterKeywords() cluster_helper.reset_cluster(cluster_config, sg_conf) topology = cluster_helper.get_cluster_topology(cluster_config) cbs_url = topology["couchbase_servers"][0] sg_url = topology["sync_gateways"][0]["public"] sg_url_admin = topology["sync_gateways"][0]["admin"] sg_db = "db" bucket = "data-bucket" log_info("Running 'test_attachment_revpos_when_ancestor_unavailable'") log_info("Using cbs_url: {}".format(cbs_url)) log_info("Using sg_url: {}".format(sg_url)) log_info("Using sg_url_admin: {}".format(sg_url_admin)) log_info("Using sg_db: {}".format(sg_db)) log_info("Using bucket: {}".format(bucket)) channels_list = ["ABC"] client = MobileRestClient() sg_util = SyncGateway() cb_server = CouchbaseServer(cbs_url) user1 = client.create_user(url=sg_url_admin, db=sg_db, name="user1", password="******", channels=channels_list) doc_with_att = document.create_doc(doc_id="att_doc", content={"sample_key": "sample_val"}, attachment_name="sample_text.txt", channels=channels_list) doc_gen_1 = client.add_doc(url=sg_url, db=sg_db, doc=doc_with_att, auth=user1) client.update_doc(url=sg_url, db=sg_db, doc_id=doc_gen_1["id"], number_updates=10, auth=user1) # Clear cached rev doc bodys from server and cycle sync_gateway sg_util.stop_sync_gateway(cluster_config=cluster_config, url=sg_url) cb_server.delete_couchbase_server_cached_rev_bodies(bucket=bucket) sg_util.start_sync_gateway(cluster_config=cluster_config, url=sg_url, config=sg_conf) client.add_conflict( url=sg_url, db=sg_db, doc_id=doc_gen_1["id"], parent_revisions=doc_gen_1["rev"], new_revision="2-foo", auth=user1 )