def test_log_200mb(params_from_base_test_setup, sg_conf_name): """Test to check maxsize with value 200MB( 100Mb by default) """ cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] xattrs_enabled = params_from_base_test_setup["xattrs_enabled"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) remote_executor = RemoteExecutor(cluster.sync_gateways[0].ip) # Stop sync_gateways log_info(">>> Stopping sync_gateway") sg_helper = SyncGateway() cluster_helper = ClusterKeywords() cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf) sg_one_url = cluster_hosts["sync_gateways"][0]["public"] sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url) remote_executor.execute("mkdir -p /tmp/sg_logs") remote_executor.execute("sudo rm -rf /tmp/sg_logs/sg_log_rotation*") remote_executor.execute( "sudo dd if=/dev/zero of=/tmp/sg_logs/sg_log_rotation.log bs=204850000 count=100" ) remote_executor.execute("sudo chmod 777 -R /tmp/sg_logs") # read sample sg_conf data = load_sync_gateway_config(sg_conf, mode, cluster_hosts["couchbase_servers"][0], xattrs_enabled, cluster_conf) # set maxsize by default data['logging']["default"]["rotation"]["maxsize"] = 200 # create temp config file in the same folder as sg_conf temp_conf = "/".join(sg_conf.split('/')[:-2]) + '/temp_conf.json' with open(temp_conf, 'w') as fp: json.dump(data, fp) sg_helper.start_sync_gateways(cluster_config=cluster_conf, url=sg_one_url, config=temp_conf) # ~1M MB will be added to log file after requests remote_executor.execute( "for ((i=1;i <= 1000;i += 1)); do curl -s http://localhost:4984/ > /dev/null; done" ) status, stdout, stderr = remote_executor.execute( "ls /tmp/sg_logs/ | grep sg_log_rotation | wc -l") # backup file should be created with 200MB assert stdout[0].rstrip() == '2' # Remove generated conf file os.remove(temp_conf)
def test_log_maxage_10_timestamp_ignored(params_from_base_test_setup, sg_conf_name): """Test to verify SG continues to wrile logs in the same file even when timestamp for the log file has been changed """ cluster_conf = params_from_base_test_setup["cluster_config"] mode = params_from_base_test_setup["mode"] xattrs_enabled = params_from_base_test_setup["xattrs_enabled"] sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode) log_info("Using cluster_conf: {}".format(cluster_conf)) log_info("Using sg_conf: {}".format(sg_conf)) cluster = Cluster(config=cluster_conf) cluster.reset(sg_config_path=sg_conf) remote_executor = RemoteExecutor(cluster.sync_gateways[0].ip) # Stop sync_gateways log_info(">>> Stopping sync_gateway") sg_helper = SyncGateway() cluster_helper = ClusterKeywords() cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf) sg_one_url = cluster_hosts["sync_gateways"][0]["public"] sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url) remote_executor.execute("mkdir -p /tmp/sg_logs") remote_executor.execute("sudo rm -rf /tmp/sg_logs/sg_log_rotation*") # generate log file with almost 1MB remote_executor.execute( "sudo dd if=/dev/zero of=/tmp/sg_logs/sg_log_rotation.log bs=1030000 count=1" ) remote_executor.execute("sudo chmod 777 -R /tmp/sg_logs") # read sample sg_conf data = load_sync_gateway_config(sg_conf, mode, cluster_hosts["couchbase_servers"][0], xattrs_enabled, cluster_conf) # set maxage = 10 days data['logging']["default"]["rotation"]["maxage"] = 10 # create temp config file in the same folder as sg_conf temp_conf = "/".join(sg_conf.split('/')[:-2]) + '/temp_conf.json' with open(temp_conf, 'w') as fp: json.dump(data, fp) sg_helper.start_sync_gateways(cluster_config=cluster_conf, url=sg_one_url, config=temp_conf) # ~1M MB will be added to log file after requests remote_executor.execute( "for ((i=1;i <= 1000;i += 1)); do curl -s http://localhost:4984/ > /dev/null; done" ) sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url) # change timestamp for log when SG stopped( we don't change file naming) remote_executor.execute( "sudo touch -d \"10 days ago\" /tmp/sg_logs/sg_log_rotation*") sg_helper.start_sync_gateways(cluster_config=cluster_conf, url=sg_one_url, config=temp_conf) _, stdout, _ = remote_executor.execute( "ls /tmp/sg_logs/ | grep sg_log_rotation | wc -l") # verify that new log file was not created assert stdout[0].rstrip() == '2' # Remove generated conf file os.remove(temp_conf)