def params_from_base_test_setup(request, params_from_base_suite_setup): # Code before the yeild will execute before each test starts # pytest command line parameters collect_logs = request.config.getoption("--collect-logs") cluster_config = params_from_base_suite_setup["cluster_config"] mode = params_from_base_suite_setup["mode"] test_name = request.node.name log_info("Setting up test '{}'".format(test_name)) # This dictionary is passed to each test yield {"cluster_config": cluster_config, "mode": mode} # Code after the yeild will execute when each test finishes log_info("Tearing down test '{}'".format(test_name)) # Capture testkit socket usage network_utils = NetworkUtils() network_utils.list_connections() # Verify all sync_gateways and sg_accels are reachable c = Cluster(cluster_config) errors = c.verify_alive(mode) # if the test failed pull logs if collect_logs or request.node.rep_call.failed or len(errors) != 0: logging_helper = Logging() logging_helper.fetch_and_analyze_logs(cluster_config=cluster_config, test_name=test_name) assert len(errors) == 0
def test_log_rotation_negative(params_from_base_test_setup, sg_conf_name): """Test log rotation with negative values for: "maxsize": -1, "maxage": -30, "maxbackups": -2 SG shouldn't start """ cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] xattrs_enabled = params_from_base_test_setup["xattrs_enabled"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) cluster_helper = ClusterKeywords() cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf) sg_one_url = cluster_hosts["sync_gateways"][0]["public"] # read sample sg_conf data = load_sync_gateway_config(sg_conf, mode, cluster_hosts["couchbase_servers"][0], xattrs_enabled, cluster_conf) # set negative values for rotation section data['logging']["default"]["rotation"] = { "maxsize": -1, "maxage": -30, "maxbackups": -2, "localtime": True } # create temp config file in the same folder as sg_conf temp_conf = "/".join(sg_conf.split('/')[:-2]) + '/temp_conf.json' with open(temp_conf, 'w') as fp: json.dump(data, fp) # Stop sync_gateways log_info(">>> Stopping sync_gateway") sg_helper = SyncGateway() sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url) try: sg_helper.start_sync_gateways(cluster_config=cluster_conf, url=sg_one_url, config=temp_conf) except ProvisioningError: sg_helper.start_sync_gateways(cluster_config=cluster_conf, url=sg_one_url, config=sg_conf) # Remove generated conf file os.remove(temp_conf) return # Remove generated conf file os.remove(temp_conf) pytest.fail("SG shouldn't be started!!!!")
def test_sync_sanity_backfill(params_from_base_test_setup, sg_conf_name): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'sync_sanity_backfill'") log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) radio_stations = ["KMOW", "HWOD", "KDWB"] number_of_docs_per_pusher = 5000 admin = Admin(cluster.sync_gateways[0]) dj_0 = admin.register_user(target=cluster.sync_gateways[0], db="db", name="dj_0", password="******") kdwb_caches = [] for radio_station in radio_stations: doc_pusher = admin.register_user( target=cluster.sync_gateways[0], db="db", name="{}_doc_pusher".format(radio_station), password="******", channels=[radio_station]) doc_pusher.add_docs(number_of_docs_per_pusher, bulk=True) if doc_pusher.name == "KDWB_doc_pusher": kdwb_caches.append(doc_pusher.cache) access_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="access_doc_pusher", password="******") # Grant dj_0 access to KDWB channel via sync after docs are pushed access_doc_pusher.add_doc("access_doc", content="access") # Build global doc_id, rev dict for all docs from all KDWB caches kdwb_docs = {k: v for cache in kdwb_caches for k, v in cache.items()} # wait for changes time.sleep(5) verify_changes(dj_0, expected_num_docs=number_of_docs_per_pusher, expected_num_revisions=0, expected_docs=kdwb_docs)
def test_log_rotation_invalid_path(params_from_base_test_setup, sg_conf_name): """Test to check that SG is not started with invalid logFilePath. OS specific case. SG should check if path correct on startup """ cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] xattrs_enabled = params_from_base_test_setup["xattrs_enabled"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) cluster_helper = ClusterKeywords() cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf) sg_one_url = cluster_hosts["sync_gateways"][0]["public"] # read sample sg_conf data = load_sync_gateway_config(sg_conf, mode, cluster_hosts["couchbase_servers"][0], xattrs_enabled, cluster_conf) # set non existing logFilePath data['logging']["default"]["logFilePath"] = "/12345/1231/131231.log" # create temp config file in the same folder as sg_conf temp_conf = "/".join(sg_conf.split('/')[:-2]) + '/temp_conf.json' with open(temp_conf, 'w') as fp: json.dump(data, fp) # Stop sync_gateways log_info(">>> Stopping sync_gateway") sg_helper = SyncGateway() sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url) try: sg_helper.start_sync_gateways(cluster_config=cluster_conf, url=sg_one_url, config=temp_conf) except ProvisioningError: sg_helper.start_sync_gateways(cluster_config=cluster_conf, url=sg_one_url, config=sg_conf) # Remove generated conf file os.remove(temp_conf) return # Remove generated conf file os.remove(temp_conf) pytest.fail("SG shouldn't be started!!!!")
def test_log_logLevel_invalid(params_from_base_test_setup, sg_conf_name): """Run SG with non existing logLevel value """ cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] xattrs_enabled = params_from_base_test_setup["xattrs_enabled"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) cluster_helper = ClusterKeywords() cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf) sg_one_url = cluster_hosts["sync_gateways"][0]["public"] # read sample sg_conf data = load_sync_gateway_config(sg_conf, mode, cluster_hosts["couchbase_servers"][0], xattrs_enabled, cluster_conf) # 'debugFake' invalid value for logLevel data['logging']["default"]["logLevel"] = "debugFake" temp_conf = "/".join(sg_conf.split('/')[:-2]) + '/temp_conf.json' # create temp config file in the same folder as sg_conf with open(temp_conf, 'w') as fp: json.dump(data, fp) # Stop sync_gateways log_info(">>> Stopping sync_gateway") sg_helper = SyncGateway() sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url) try: sg_helper.start_sync_gateways(cluster_config=cluster_conf, url=sg_one_url, config=temp_conf) except ProvisioningError: sg_helper.start_sync_gateways(cluster_config=cluster_conf, url=sg_one_url, config=sg_conf) # Remove generated conf file os.remove(temp_conf) return # Remove generated conf file os.remove(temp_conf) pytest.fail("SG shouldn't be started!!!!")
def test_log_logKeys_string(params_from_base_test_setup, sg_conf_name): """Negative test to verify that we are not able start SG when logKeys is string """ cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] xattrs_enabled = params_from_base_test_setup["xattrs_enabled"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) # read sample sg_conf cluster_helper = ClusterKeywords() cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf) sg_one_url = cluster_hosts["sync_gateways"][0]["public"] data = load_sync_gateway_config(sg_conf, mode, cluster_hosts["couchbase_servers"][0], xattrs_enabled, cluster_conf) # set logKeys as string in config file data['logging']["default"]["logKeys"] = "http" # create temp config file in the same folder as sg_conf temp_conf = "/".join(sg_conf.split('/')[:-2]) + '/temp_conf.json' with open(temp_conf, 'w') as fp: json.dump(data, fp) # Stop sync_gateways log_info(">>> Stopping sync_gateway") sg_helper = SyncGateway() sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url) try: sg_helper.start_sync_gateways(cluster_config=cluster_conf, url=sg_one_url, config=temp_conf) except ProvisioningError: sg_helper.start_sync_gateways(cluster_config=cluster_conf, url=sg_one_url, config=sg_conf) # Remove generated conf file os.remove(temp_conf) return # Remove generated conf file os.remove(temp_conf) pytest.fail("SG shouldn't be started!!!!")
def test_enable_cbs_ssl_in_cluster_config(): c = Cluster(MOCK_CLUSTER_CONFIG) assert not c.cbs_ssl persist_cluster_config_environment_prop(MOCK_CLUSTER_CONFIG, 'cbs_ssl_enabled', True) with open(MOCK_CLUSTER_CONFIG + ".json") as f: cluster_json = json.loads(f.read()) assert "environment" in cluster_json assert cluster_json["environment"]["cbs_ssl_enabled"] c = Cluster(MOCK_CLUSTER_CONFIG) assert c.cbs_ssl config = ConfigParser.ConfigParser() config.read(MOCK_CLUSTER_CONFIG) assert config.has_section("environment") assert config.getboolean('environment', 'cbs_ssl_enabled')
def reset_cluster(self, cluster_config, sync_gateway_config): """ 1. Stop sync_gateways 2. Stop sg_accels 3. Delete sync_gateway artifacts (logs, conf) 4. Delete sg_accel artifacts (logs, conf) 5. Delete all server buckets 6. Create buckets from 'sync_gateway_config' 7. Wait for server to be in 'healthy' state 8. Deploy sync_gateway config and start 9. Deploy sg_accel config and start (distributed index mode only) """ cluster = Cluster(config=cluster_config) cluster.reset(sync_gateway_config)
def test_multiple_db_single_data_bucket_single_index_bucket(params_from_base_test_setup, sg_conf_name, num_users, num_docs_per_user): cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'multiple_db_unique_data_bucket_unique_index_bucket'") log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) log_info("Using num_users: {}".format(num_users)) log_info("Using num_docs_per_user: {}".format(num_docs_per_user)) # 2 dbs share the same data and index bucket cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) num_db_users = num_users num_db2_users = num_users num_docs_per_user = num_docs_per_user admin = Admin(cluster.sync_gateways[0]) db_one_users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="bulk_db_user", number=num_db_users, password="******", channels=["ABC"]) db_two_users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db2", name_prefix="bulk_db2_user", number=num_db2_users, password="******", channels=["ABC"]) all_users = list(db_one_users) all_users.extend(db_two_users) assert len(all_users) == num_db_users + num_db2_users # Round robin num_sgs = len(cluster.sync_gateways) count = 1 for user in all_users: user.add_docs(num_docs_per_user, bulk=True) user.target = cluster.sync_gateways[(count + 1) % num_sgs] count += 1 time.sleep(10) # Get list of all docs from users caches cached_docs_from_all_users = {k: v for user in all_users for k, v in user.cache.items()} # Verify each user has all of the docs verify_changes(all_users, expected_num_docs=(num_users * 2) * num_docs_per_user, expected_num_revisions=0, expected_docs=cached_docs_from_all_users)
def test_log_nondefault_logKeys_set(params_from_base_test_setup, sg_conf_name): """Test to verify non default logKeys with any invalid area. SG should work even with non existing logging area (positive case) """ cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] xattrs_enabled = params_from_base_test_setup["xattrs_enabled"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) # read sample sg_conf cluster_helper = ClusterKeywords() cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf) sg_one_url = cluster_hosts["sync_gateways"][0]["public"] data = load_sync_gateway_config(sg_conf, mode, cluster_hosts["couchbase_servers"][0], xattrs_enabled, cluster_conf) # "FAKE" not valid area in logging data['logging']["default"]["logKeys"] = ["HTTP", "FAKE"] # create temp config file in the same folder as sg_conf temp_conf = "/".join(sg_conf.split('/')[:-2]) + '/temp_conf.json' with open(temp_conf, 'w') as fp: json.dump(data, fp) # Stop sync_gateways log_info(">>> Stopping sync_gateway") sg_helper = SyncGateway() sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url) # Start sync_gateways sg_helper.start_sync_gateways(cluster_config=cluster_conf, url=sg_one_url, config=temp_conf) # Remove generated conf file os.remove(temp_conf)
def test_bulk_get_compression(params_from_base_test_setup, sg_conf_name, num_docs, accept_encoding, x_accept_part_encoding, user_agent): cluster_config = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Running 'test_bulk_get_compression'") log_info("Using cluster_config: {}".format(cluster_config)) log_info("Using sg_conf: {}".format(sg_conf)) log_info("Using num_docs: {}".format(num_docs)) log_info("Using user_agent: {}".format(user_agent)) log_info("Using accept_encoding: {}".format(accept_encoding)) log_info("Using x_accept_part_encoding: {}".format(x_accept_part_encoding)) cluster = Cluster(config=cluster_config) cluster.reset(sg_config_path=sg_conf) admin = Admin(cluster.sync_gateways[0]) user = admin.register_user(cluster.sync_gateways[0], "db", "seth", "password", channels=["seth"]) doc_body = Data.load("mock_users_20k.json") with concurrent.futures.ThreadPoolExecutor(max_workers=libraries.testkit.settings.MAX_REQUEST_WORKERS) as executor: futures = [executor.submit(user.add_doc, doc_id="test-{}".format(i), content=doc_body) for i in range(num_docs)] for future in concurrent.futures.as_completed(futures): try: log_info(future.result()) except Exception as e: log_info("Failed to push doc: {}".format(e)) docs = [{"id": "test-{}".format(i)} for i in range(num_docs)] payload = {"docs": docs} # Issue curl request and get size of request response_size = issue_request(cluster.sync_gateways[0], user_agent, accept_encoding, x_accept_part_encoding, payload) log_info("Response size: {}".format(response_size)) # Verfiy size matches expected size verify_response_size(user_agent, accept_encoding, x_accept_part_encoding, response_size)