def setup_client_syncgateway_test(request, setup_client_syncgateway_suite): """Test setup fixture for client sync_gateway tests""" log_info("Setting up client sync_gateway test ...") liteserv = setup_client_syncgateway_suite test_name = request.node.name ls_url = liteserv.start( "{}/logs/{}-{}-{}.txt".format(RESULTS_DIR, type(liteserv).__name__, test_name, datetime.datetime.now()) ) cluster_helper = ClusterKeywords() sg_helper = SyncGateway() cluster_hosts = cluster_helper.get_cluster_topology(os.environ["CLUSTER_CONFIG"]) sg_url = cluster_hosts["sync_gateways"][0]["public"] sg_admin_url = cluster_hosts["sync_gateways"][0]["admin"] sg_helper.stop_sync_gateway(cluster_config=os.environ["CLUSTER_CONFIG"], url=sg_url) # Yield values to test case via fixture argument yield { "cluster_config": os.environ["CLUSTER_CONFIG"], "ls_url": ls_url, "sg_url": sg_url, "sg_admin_url": sg_admin_url, } log_info("Tearing down test") # Teardown test client = MobileRestClient() client.delete_databases(ls_url) liteserv.stop() sg_helper.stop_sync_gateway(cluster_config=os.environ["CLUSTER_CONFIG"], url=sg_url) # if the test failed pull logs if request.node.rep_call.failed: logging_helper = Logging() logging_helper.fetch_and_analyze_logs(cluster_config=os.environ["CLUSTER_CONFIG"], test_name=test_name)
def test_attachment_revpos_when_ancestor_unavailable(params_from_base_test_setup, sg_conf_name): """ Creates a document with an attachment, then updates that document so that the body of the revision that originally pushed the document is no longer available. Add a new revision that's not a child of the active revision, and validate that it's uploaded successfully. Example: 1. Document is created with attachment at rev-1 2. Document is updated (strip digests and length, only put revpos & stub) multiple times on the server, goes to rev-4 3. Client attempts to add a new (conflicting) revision 2, with parent rev-1. 4. If the body of rev-1 is no longer available on the server (temporary backup of revision has expired, and is no longer stored in the in-memory rev cache), we were throwing an error to client because we couldn't verify based on the _attachments property in rev-1. 5. In this scenario, before returning error, we are now checking if the active revision has a common ancestor with the incoming revision. If so, we can validate any revpos values equal to or earlier than the common ancestor against the active revision """ cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) cluster_helper = ClusterKeywords() cluster_helper.reset_cluster(cluster_config, sg_conf) topology = cluster_helper.get_cluster_topology(cluster_config) cbs_url = topology["couchbase_servers"][0] sg_url = topology["sync_gateways"][0]["public"] sg_url_admin = topology["sync_gateways"][0]["admin"] sg_db = "db" bucket = "data-bucket" log_info("Running 'test_attachment_revpos_when_ancestor_unavailable'") log_info("Using cbs_url: {}".format(cbs_url)) log_info("Using sg_url: {}".format(sg_url)) log_info("Using sg_url_admin: {}".format(sg_url_admin)) log_info("Using sg_db: {}".format(sg_db)) log_info("Using bucket: {}".format(bucket)) channels_list = ["ABC"] client = MobileRestClient() sg_util = SyncGateway() cb_server = CouchbaseServer(cbs_url) user1 = client.create_user(url=sg_url_admin, db=sg_db, name="user1", password="******", channels=channels_list) doc_with_att = document.create_doc(doc_id="att_doc", content={"sample_key": "sample_val"}, attachment_name="sample_text.txt", channels=channels_list) doc_gen_1 = client.add_doc(url=sg_url, db=sg_db, doc=doc_with_att, auth=user1) client.update_doc(url=sg_url, db=sg_db, doc_id=doc_gen_1["id"], number_updates=10, auth=user1) # Clear cached rev doc bodys from server and cycle sync_gateway sg_util.stop_sync_gateway(cluster_config=cluster_config, url=sg_url) cb_server.delete_couchbase_server_cached_rev_bodies(bucket=bucket) sg_util.start_sync_gateway(cluster_config=cluster_config, url=sg_url, config=sg_conf) client.add_conflict( url=sg_url, db=sg_db, doc_id=doc_gen_1["id"], parent_revisions=doc_gen_1["rev"], new_revision="2-foo", auth=user1 )
def setup_client_2sgs_test(request): """Test setup fixture for client sync_gateway tests""" log_info("Setting up client sync_gateway test ...") liteserv_platform = request.config.getoption("--liteserv-platform") liteserv_version = request.config.getoption("--liteserv-version") liteserv_host = request.config.getoption("--liteserv-host") liteserv_port = request.config.getoption("--liteserv-port") liteserv_storage_engine = request.config.getoption("--liteserv-storage-engine") ls = LiteServ() client = MobileRestClient() test_name = request.node.name # Verify LiteServ is not running ls.verify_liteserv_not_running(host=liteserv_host, port=liteserv_port) ls_cluster_target = None if liteserv_platform == "net-win": ls_cluster_target = "resources/cluster_configs/windows" print("Starting LiteServ ...") if liteserv_platform != "net-win": # logging is file ls_logging = open("{}/logs/{}-ls1-{}-{}.txt".format(RESULTS_DIR, datetime.datetime.now(), liteserv_platform, test_name), "w") else: # logging is name ls_logging = "{}/logs/{}-ls1-{}-{}.txt".format(RESULTS_DIR, datetime.datetime.now(), liteserv_platform, test_name) ls_url, ls_handle = ls.start_liteserv( platform=liteserv_platform, version=liteserv_version, host=liteserv_host, port=liteserv_port, storage_engine=liteserv_storage_engine, logfile=ls_logging, cluster_config=ls_cluster_target ) cluster_helper = ClusterKeywords() sg_helper = SyncGateway() cluster_hosts = cluster_helper.get_cluster_topology(os.environ["CLUSTER_CONFIG"]) sg_one_url = cluster_hosts["sync_gateways"][0]["public"] sg_one_admin_url = cluster_hosts["sync_gateways"][0]["admin"] sg_two_url = cluster_hosts["sync_gateways"][1]["public"] sg_two_admin_url = cluster_hosts["sync_gateways"][1]["admin"] sg_helper.stop_sync_gateway(cluster_config=os.environ["CLUSTER_CONFIG"], url=sg_one_url) sg_helper.stop_sync_gateway(cluster_config=os.environ["CLUSTER_CONFIG"], url=sg_two_url) # Yield values to test case via fixture argument yield { "cluster_config": os.environ["CLUSTER_CONFIG"], "ls_url": ls_url, "sg_one_url": sg_one_url, "sg_one_admin_url": sg_one_admin_url, "sg_two_url": sg_two_url, "sg_two_admin_url": sg_two_admin_url } log_info("Tearing down test") # Teardown test client.delete_databases(ls_url) ls.shutdown_liteserv(host=liteserv_host, platform=liteserv_platform, version=liteserv_version, storage_engine=liteserv_storage_engine, process_handle=ls_handle, logfile=ls_logging, cluster_config=ls_cluster_target) # Verify LiteServ is killed ls.verify_liteserv_not_running(host=liteserv_host, port=liteserv_port) sg_helper.stop_sync_gateway(cluster_config=os.environ["CLUSTER_CONFIG"], url=sg_one_url) sg_helper.stop_sync_gateway(cluster_config=os.environ["CLUSTER_CONFIG"], url=sg_two_url) # if the test failed pull logs if request.node.rep_call.failed: logging_helper = Logging() logging_helper.fetch_and_analyze_logs(cluster_config=os.environ["CLUSTER_CONFIG"], test_name=test_name)