def test_rack(): sdk_install.install( config.PACKAGE_NAME, config.get_foldered_service_name(), 3, additional_options={ "service": { "name": config.get_foldered_service_name() }, "nodes": { "placement_constraint": '[["@zone", "GROUP_BY", "1"]]' }, }, ) raw_status = nodetool.cmd(config.get_foldered_service_name(), "node-0-server", "status") log.info("raw_status: {}".format(raw_status)) stdout = raw_status[1] log.info("stdout: {}".format(stdout)) node = nodetool.parse_status(stdout)[0] log.info("node: {}".format(node)) assert node.get_rack() != "rack1" assert sdk_utils.get_cluster_zones()[node.get_address()] == node.get_rack()
def configure_package(configure_security): test_jobs = [] try: test_jobs = config.get_all_jobs(node_address=config.get_foldered_node_address()) # destroy/reinstall any prior leftover jobs, so that they don't touch the newly installed service: for job in test_jobs: sdk_jobs.install_job(job) sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name()) # user=root because Azure CLI needs to run in root... # We don't run the Azure tests in strict however, so don't set it then. if os.environ.get("SECURITY") == "strict": additional_options={"service": { "name": config.get_foldered_service_name() } } else: additional_options={"service": { "name": config.get_foldered_service_name(), "user": "******" } } sdk_install.install( config.PACKAGE_NAME, config.get_foldered_service_name(), config.DEFAULT_TASK_COUNT, additional_options=additional_options) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name()) # remove job definitions from metronome for job in test_jobs: sdk_jobs.remove_job(job)
def test_backup_and_restore_to_s3_with_jmx_with_auth(): key_id = os.getenv("AWS_ACCESS_KEY_ID") if not key_id: assert ( False ), 'AWS credentials are required for this test. Disable test with e.g. TEST_TYPES="sanity and not aws"' plan_parameters = { "AWS_ACCESS_KEY_ID": key_id, "AWS_SECRET_ACCESS_KEY": os.getenv("AWS_SECRET_ACCESS_KEY"), "AWS_REGION": os.getenv("AWS_REGION", "us-west-2"), "S3_BUCKET_NAME": os.getenv("AWS_BUCKET_NAME", "infinity-framework-test"), "SNAPSHOT_NAME": str(uuid.uuid1()), "CASSANDRA_KEYSPACES": '"testspace1 testspace2"', } config.run_backup_and_restore_with_auth( config.get_foldered_service_name(), "backup-s3", "restore-s3", plan_parameters, config.get_foldered_node_address(), ) sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name()) test_jobs: List[Dict[str, Any]] = [] test_jobs = config.get_all_jobs( node_address=config.get_foldered_node_address(), auth=True) for job in test_jobs: sdk_jobs.remove_job(job)
def configure_package(configure_security: None) -> Iterator[None]: try: sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name()) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name())
def test_rack(): sdk_install.install(config.PACKAGE_NAME, config.get_foldered_service_name(), 3, additional_options={ "service": { "name": config.get_foldered_service_name() }, "nodes": { "placement_constraint": "[[\"@zone\", \"GROUP_BY\", \"1\"]]" } }) # dcos task exec node-0-server bash -c 'JAVA_HOME=jre1.8.0_144 apache-cassandra-3.0.14/bin/nodetool status' raw_status = nodetool.cmd('node-0', 'status') log.info("raw_status: {}".format(raw_status)) stdout = raw_status[1] log.info("stdout: {}".format(stdout)) node = nodetool.parse_status(stdout)[0] log.info("node: {}".format(node)) assert node.get_rack() != 'rack1' assert 'us-west' in node.get_rack()
def test_repair_cleanup_plans_complete(): parameters = {'CASSANDRA_KEYSPACE': 'testspace1'} # populate 'testspace1' for test, then delete afterwards: with sdk_jobs.RunJobContext( before_jobs=[ config.get_write_data_job( node_address=config.get_foldered_node_address()), config.get_verify_data_job( node_address=config.get_foldered_node_address()) ], after_jobs=[ config.get_delete_data_job( node_address=config.get_foldered_node_address()), config.get_verify_deletion_job( node_address=config.get_foldered_node_address()) ]): sdk_plan.start_plan(config.get_foldered_service_name(), 'cleanup', parameters=parameters) sdk_plan.wait_for_completed_plan(config.get_foldered_service_name(), 'cleanup') sdk_plan.start_plan(config.get_foldered_service_name(), 'repair', parameters=parameters) sdk_plan.wait_for_completed_plan(config.get_foldered_service_name(), 'repair')
def test_rack(): sdk_install.install(config.PACKAGE_NAME, config.get_foldered_service_name(), 3, additional_options={ "service": { "name": config.get_foldered_service_name() }, "nodes": { "placement_constraint": "[[\"@zone\", \"GROUP_BY\", \"1\"]]" } }) raw_status = nodetool.cmd(config.get_foldered_service_name(), 'node-0-server', 'status') log.info("raw_status: {}".format(raw_status)) stdout = raw_status[1] log.info("stdout: {}".format(stdout)) node = nodetool.parse_status(stdout)[0] log.info("node: {}".format(node)) assert node.get_rack() != 'rack1' assert 'us-west' in node.get_rack()
def configure_package(configure_security): test_jobs = [] try: test_jobs = config.get_all_jobs( node_address=config.get_foldered_node_address()) sdk_install.uninstall(config.get_foldered_service_name(), package_name=config.PACKAGE_NAME) sdk_upgrade.test_upgrade( "beta-{}".format(config.PACKAGE_NAME), config.PACKAGE_NAME, config.DEFAULT_TASK_COUNT, service_name=config.get_foldered_service_name(), additional_options={ "service": { "name": config.get_foldered_service_name() } }) tmp_dir = tempfile.mkdtemp(prefix='cassandra-test') for job in test_jobs: sdk_jobs.install_job(job, tmp_dir=tmp_dir) yield # let the test session execute finally: sdk_install.uninstall(config.get_foldered_service_name(), package_name=config.PACKAGE_NAME) for job in test_jobs: sdk_jobs.remove_job(job)
def configure_package(configure_security): test_jobs = [] try: test_jobs = config.get_all_jobs( node_address=config.get_foldered_node_address()) # destroy any leftover jobs first, so that they don't touch the newly installed service: for job in test_jobs: sdk_jobs.remove_job(job) sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name()) sdk_upgrade.test_upgrade(config.PACKAGE_NAME, config.get_foldered_service_name(), config.DEFAULT_TASK_COUNT, additional_options={ "service": { "name": config.get_foldered_service_name() } }) tmp_dir = tempfile.mkdtemp(prefix='cassandra-test') for job in test_jobs: sdk_jobs.install_job(job, tmp_dir=tmp_dir) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name()) for job in test_jobs: sdk_jobs.remove_job(job)
def configure_package(configure_security): try: sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name()) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name())
def configure_package(configure_security): test_jobs = [] try: test_jobs = config.get_all_jobs( node_address=config.get_foldered_node_address()) sdk_install.uninstall(config.get_foldered_service_name(), package_name=config.PACKAGE_NAME) # user=root because Azure CLI needs to run in root... sdk_install.install(config.PACKAGE_NAME, config.DEFAULT_TASK_COUNT, service_name=config.get_foldered_service_name(), additional_options={ "service": { "name": config.get_foldered_service_name(), "user": "******" } }) tmp_dir = tempfile.mkdtemp(prefix='cassandra-test') for job in test_jobs: sdk_jobs.install_job(job, tmp_dir=tmp_dir) yield # let the test session execute finally: sdk_install.uninstall(config.get_foldered_service_name(), package_name=config.PACKAGE_NAME) # remove job definitions from metronome for job in test_jobs: sdk_jobs.remove_job(job)
def configure_package(configure_security): test_jobs = [] try: test_jobs = config.get_all_jobs( node_address=config.get_foldered_node_address()) # destroy/reinstall any prior leftover jobs, so that they don't touch the newly installed service: for job in test_jobs: sdk_jobs.install_job(job) sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name()) sdk_upgrade.test_upgrade(config.PACKAGE_NAME, config.get_foldered_service_name(), config.DEFAULT_TASK_COUNT, additional_options={ "service": { "name": config.get_foldered_service_name() } }) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name()) for job in test_jobs: sdk_jobs.remove_job(job)
def configure_package(configure_security: None) -> Iterator[None]: test_jobs: List[Dict[str, Any]] = [] try: test_jobs = config.get_all_jobs(node_address=config.get_foldered_node_address()) # destroy/reinstall any prior leftover jobs, so that they don't touch the newly installed service: for job in test_jobs: sdk_jobs.install_job(job) sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name()) # user=root because Azure CLI needs to run in root... # We don't run the Azure tests in strict however, so don't set it then. if os.environ.get("SECURITY") == "strict": additional_options = {"service": {"name": config.get_foldered_service_name()}} else: additional_options = { "service": {"name": config.get_foldered_service_name(), "user": "******"} } sdk_install.install( config.PACKAGE_NAME, config.get_foldered_service_name(), config.DEFAULT_TASK_COUNT, additional_options=additional_options, ) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name()) # remove job definitions from metronome for job in test_jobs: sdk_jobs.remove_job(job)
def test_endpoints(): # check that we can reach the scheduler via admin router, and that returned endpoints are sanitized: endpoints = sdk_networks.get_endpoint(config.PACKAGE_NAME, config.get_foldered_service_name(), "native-client") assert endpoints["dns"][0] == sdk_hosts.autoip_host( config.get_foldered_service_name(), "node-0-server", 9042) assert "vip" not in endpoints
def test_endpoints(): # check that we can reach the scheduler via admin router, and that returned endpoints are sanitized: endpoints = cmd.svc_cli( config.PACKAGE_NAME, config.get_foldered_service_name(), 'endpoints native-client', json=True) assert endpoints['dns'][0] == sdk_hosts.autoip_host( config.get_foldered_service_name(), 'node-0-server', 9042) assert not 'vip' in endpoints
def test_endpoints(): # check that we can reach the scheduler via admin router, and that returned endpoints are sanitized: endpoints = json.loads( cmd.run_cli('cassandra --name={} endpoints node'.format( config.get_foldered_service_name()))) assert endpoints['dns'][0] == sdk_hosts.autoip_host( config.get_foldered_service_name(), 'node-0-server', 9042) assert endpoints['vip'] == sdk_hosts.vip_host( config.get_foldered_service_name(), 'node', 9042)
def test_udf() -> None: test_jobs: List[Dict[str, Any]] = [] try: test_jobs = config.get_udf_jobs( node_address=config.get_foldered_node_address()) # destroy/reinstall any prior leftover jobs, so that they don't touch the newly installed service: for job in test_jobs: sdk_jobs.install_job(job) new_config = { "cassandra": { "enable_user_defined_functions": True, "enable_scripted_user_defined_functions": True, } } sdk_service.update_configuration( config.PACKAGE_NAME, config.get_foldered_service_name(), new_config, config.DEFAULT_TASK_COUNT, ) config.verify_client_can_write_read_udf( config.get_foldered_node_address()) finally: # remove job definitions from metronome for job in test_jobs: sdk_jobs.remove_job(job)
def test_custom_rack_upgrade(): foldered_service_name = config.get_foldered_service_name() service_options = {"service": {"name": foldered_service_name, "rack": "not-rack1"}} sdk_upgrade.test_upgrade( config.PACKAGE_NAME, foldered_service_name, config.DEFAULT_TASK_COUNT, additional_options=service_options, )
def test_custom_rack_upgrade() -> None: foldered_service_name = config.get_foldered_service_name() service_options = {"service": {"name": foldered_service_name, "rack": "not-rack1"}} sdk_upgrade.test_upgrade( config.PACKAGE_NAME, foldered_service_name, config.DEFAULT_TASK_COUNT, from_options=service_options, )
def configure_package(configure_security): test_jobs = [] try: test_jobs = config.get_all_jobs( node_address=config.get_foldered_node_address()) # destroy any leftover jobs first, so that they don't touch the newly installed service: for job in test_jobs: sdk_jobs.remove_job(job) sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name()) # user=root because Azure CLI needs to run in root... # We don't run the Azure tests in strict however, so don't set it then. if os.environ.get("SECURITY") == "strict": additional_options = { "service": { "name": config.get_foldered_service_name() } } else: additional_options = { "service": { "name": config.get_foldered_service_name(), "user": "******" } } sdk_install.install(config.PACKAGE_NAME, config.get_foldered_service_name(), config.DEFAULT_TASK_COUNT, additional_options=additional_options) tmp_dir = tempfile.mkdtemp(prefix='cassandra-test') for job in test_jobs: sdk_jobs.install_job(job, tmp_dir=tmp_dir) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name()) # remove job definitions from metronome for job in test_jobs: sdk_jobs.remove_job(job)
def test_custom_jmx_port(): expected_open_port = ":7200 (LISTEN)" new_config = {"cassandra": {"jmx_port": 7200}} sdk_service.update_configuration( config.PACKAGE_NAME, config.get_foldered_service_name(), new_config, config.DEFAULT_TASK_COUNT, ) sdk_plan.wait_for_completed_deployment(config.get_foldered_service_name()) tasks = sdk_tasks.get_service_tasks(config.get_foldered_service_name(), "node") for task in tasks: _, stdout, _ = sdk_cmd.run_cli("task exec {} lsof -i :7200".format( task.id)) assert expected_open_port in stdout
def test_rack() -> None: sdk_install.install( config.PACKAGE_NAME, config.get_foldered_service_name(), 3, additional_options={ "service": {"name": config.get_foldered_service_name()}, "nodes": {"placement_constraint": '[["@zone", "GROUP_BY", "1"]]'}, }, ) raw_status = nodetool.cmd(config.get_foldered_service_name(), "node-0-server", "status") log.info("raw_status: {}".format(raw_status)) stdout = raw_status[1] log.info("stdout: {}".format(stdout)) node = nodetool.parse_status(stdout)[0] log.info("node: {}".format(node)) assert node.get_rack() != "rack1" assert sdk_utils.get_cluster_zones()[node.get_address()] == node.get_rack()
def test_rack(): sdk_install.install( config.PACKAGE_NAME, config.get_foldered_service_name(), 3, additional_options={ "service": { "name": config.get_foldered_service_name() }, "nodes": { "placement_constraint": "[[\"@zone\", \"GROUP_BY\", \"1\"]]" } }) raw_status = nodetool.cmd(config.get_foldered_service_name(), 'node-0-server', 'status') log.info("raw_status: {}".format(raw_status)) stdout = raw_status[1] log.info("stdout: {}".format(stdout)) node = nodetool.parse_status(stdout)[0] log.info("node: {}".format(node)) assert node.get_rack() != 'rack1' assert 'us-west' in node.get_rack()
def test_mesos_v0_api(): try: foldered_name = config.get_foldered_service_name() # Install Cassandra using the v0 api. # Then, clean up afterwards. sdk_install.uninstall(config.PACKAGE_NAME, foldered_name) sdk_install.install( config.PACKAGE_NAME, config.get_foldered_service_name(), config.DEFAULT_TASK_COUNT, additional_options={ "service": {"name": foldered_name, "mesos_api_version": "V0"} } ) sdk_tasks.check_running(foldered_name, config.DEFAULT_TASK_COUNT) finally: sdk_install.uninstall(config.PACKAGE_NAME, foldered_name) # reinstall the v1 version for the following tests sdk_install.install( config.PACKAGE_NAME, foldered_name, config.DEFAULT_TASK_COUNT, additional_options={"service": {"name": foldered_name}})
def test_metrics(): expected_metrics = [ "org.apache.cassandra.metrics.Table.CoordinatorReadLatency.system.hints.p999", "org.apache.cassandra.metrics.Table.CompressionRatio.system_schema.indexes", "org.apache.cassandra.metrics.ThreadPools.ActiveTasks.internal.MemtableReclaimMemory" ] def expected_metrics_exist(emitted_metrics): return sdk_metrics.check_metrics_presence(emitted_metrics, expected_metrics) sdk_metrics.wait_for_service_metrics(config.PACKAGE_NAME, config.get_foldered_service_name(), "node-0-server", config.DEFAULT_CASSANDRA_TIMEOUT, expected_metrics_exist)
def check_secure_jmx_output(self_signed_trust_store, authentication): foldered_name = config.get_foldered_service_name() node_task_id_0 = sdk_tasks.get_task_ids(foldered_name)[0] install_jmxterm(task_id=node_task_id_0) generate_jmx_command_files(task_id=node_task_id_0) if self_signed_trust_store: trust_store = "$MESOS_SANDBOX/jmx/trust_store" trust_store_password = "******" else: trust_store = "$JAVA_HOME/lib/security/cacerts" trust_store_password = "******" cmd = ( "export JAVA_HOME=$(ls -d $MESOS_SANDBOX/jdk*/) && " "$JAVA_HOME/bin/java " "-Duser.home=$MESOS_SANDBOX " "-Djdk.tls.client.protocols=TLSv1.2 " "-Djavax.net.ssl.trustStore={trust_store} " "-Djavax.net.ssl.trustStorePassword={trust_store_password} " "-Djavax.net.ssl.keyStore=$MESOS_SANDBOX/jmx/key_store -Djavax.net.ssl.keyStorePassword=deleteme " "-Djavax.net.ssl.trustStoreType=JKS -Djavax.net.ssl.keyStoreType=JKS -jar jmxterm-1.0.1-uber.jar " "-l service:jmx:rmi:///jndi/rmi://$MESOS_CONTAINER_IP:7199/jmxrmi -u admin -p adminpassword " "-s -v silent -n".format(trust_store=trust_store, trust_store_password=trust_store_password)) input_jmx_commands = " < jmx_beans_command.txt" full_cmd = "bash -c '{}{}'".format(cmd, input_jmx_commands) _, output, _ = sdk_cmd.run_cli("task exec {} {}".format( node_task_id_0, full_cmd), print_output=True) assert "org.apache.cassandra.net:type=FailureDetector" in output assert "org.apache.cassandra.net:type=Gossiper" in output input_jmx_commands = " < jmx_domains_command.txt" full_cmd = "bash -c '{}{}'".format(cmd, input_jmx_commands) rc, output, stderr = sdk_cmd.run_cli("task exec {} {}".format( node_task_id_0, full_cmd), print_output=True) assert "org.apache.cassandra.metrics" in output assert "org.apache.cassandra.service" in output
def test_backup_and_restore_to_s3(): key_id = os.getenv('AWS_ACCESS_KEY_ID') if not key_id: assert False, 'AWS credentials are required for this test. Disable test with e.g. TEST_TYPES="sanity and not aws"' plan_parameters = { 'AWS_ACCESS_KEY_ID': key_id, 'AWS_SECRET_ACCESS_KEY': os.getenv('AWS_SECRET_ACCESS_KEY'), 'AWS_REGION': os.getenv('AWS_REGION', 'us-west-2'), 'S3_BUCKET_NAME': os.getenv('AWS_BUCKET_NAME', 'infinity-framework-test'), 'SNAPSHOT_NAME': str(uuid.uuid1()), 'CASSANDRA_KEYSPACES': '"testspace1 testspace2"', } config.run_backup_and_restore(config.get_foldered_service_name(), 'backup-s3', 'restore-s3', plan_parameters, config.get_foldered_node_address())
def test_backup_and_restore_to_s3(): print("MDS debugging..") traceback.print_stack() key_id = os.getenv('MDS_AWS_ACCESS_ID') if not key_id: assert False, 'AWS credentials are required for this test. Disable test with e.g. TEST_TYPES="sanity and not aws"' plan_parameters = { 'AWS_ACCESS_KEY_ID': key_id, 'AWS_SECRET_ACCESS_KEY': os.getenv('MDS_AWS_ACCOUNT_KEY'), 'AWS_REGION': 'us-west-2', 'external_location': 'shaugupt245-backup', 'backup_name': str(uuid.uuid1()), 'CASSANDRA_KEYSPACES': '"system"', } config.run_backup_and_restore(config.get_foldered_service_name(), 'backup-s3', 'restore-s3', plan_parameters, config.get_foldered_node_address())
def test_backup_and_restore_to_azure(): client_id = os.getenv('AZURE_CLIENT_ID') if not client_id: assert False, 'Azure credentials are required for this test. Disable test with e.g. TEST_TYPES="sanity and not azure"' plan_parameters = { 'AZURE_CLIENT_ID': client_id, 'AZURE_CLIENT_SECRET': os.getenv('AZURE_CLIENT_SECRET'), 'AZURE_TENANT_ID': os.getenv('AZURE_TENANT_ID'), 'AZURE_STORAGE_ACCOUNT': os.getenv('AZURE_STORAGE_ACCOUNT'), 'AZURE_STORAGE_KEY': os.getenv('AZURE_STORAGE_KEY'), 'CONTAINER_NAME': os.getenv('CONTAINER_NAME', 'cassandra-test'), 'SNAPSHOT_NAME': str(uuid.uuid1()), 'CASSANDRA_KEYSPACES': '"testspace1 testspace2"', } run_backup_and_restore(config.get_foldered_service_name(), 'backup-azure', 'restore-azure', plan_parameters, config.get_foldered_node_address())
def test_config_update_across_restart(): foldered_service_name = config.get_foldered_service_name() batch_size_warn_threshold_in_kb = 15 sdk_upgrade.update_or_upgrade_or_downgrade( config.PACKAGE_NAME, foldered_service_name, to_package_version=None, additional_options={ "service": {"name": foldered_service_name}, "cassandra": {"batch_size_warn_threshold_in_kb": batch_size_warn_threshold_in_kb}, }, expected_running_tasks=config.DEFAULT_TASK_COUNT, wait_for_deployment=True, timeout_seconds=config.DEFAULT_CASSANDRA_TIMEOUT, ) for _ in range(3): cmd_list = ["pod", "restart", "node-0"] sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_service_name, " ".join(cmd_list)) sdk_plan.wait_for_kicked_off_recovery(foldered_service_name) sdk_plan.wait_for_completed_recovery( foldered_service_name, timeout_seconds=config.DEFAULT_CASSANDRA_TIMEOUT ) _, stdout, _ = sdk_cmd.service_task_exec(foldered_service_name, "node-0-server", "env") envvar = "CASSANDRA_BATCH_SIZE_WARN_THRESHOLD_IN_KB=" envvar_pos = stdout.find(envvar) if envvar_pos < 0: raise Exception("Required envvar not found") if not stdout[envvar_pos + len(envvar) :].startswith( "{}".format(batch_size_warn_threshold_in_kb) ): found_string = stdout[envvar_pos + len(envvar) : envvar_pos + len(envvar) + 15] log.error( "Looking for %s%d but found: %s", envvar, batch_size_warn_threshold_in_kb, found_string, ) raise Exception("Envvar not set to required value")
def test_backup_and_restore_to_s3(): key_id = os.getenv('AWS_ACCESS_KEY_ID') if not key_id: assert False, 'AWS credentials are required for this test. Disable test with e.g. TEST_TYPES="sanity and not aws"' plan_parameters = { 'AWS_ACCESS_KEY_ID': key_id, 'AWS_SECRET_ACCESS_KEY': os.getenv('AWS_SECRET_ACCESS_KEY'), 'AWS_REGION': os.getenv('AWS_REGION', 'us-west-2'), 'S3_BUCKET_NAME': os.getenv('AWS_BUCKET_NAME', 'infinity-framework-test'), 'SNAPSHOT_NAME': str(uuid.uuid1()), 'CASSANDRA_KEYSPACES': '"testspace1 testspace2"', } config.run_backup_and_restore( config.get_foldered_service_name(), 'backup-s3', 'restore-s3', plan_parameters, config.get_foldered_node_address())
def test_backup_and_restore_to_azure(): client_id = os.getenv('AZURE_CLIENT_ID') if not client_id: assert False, 'Azure credentials are required for this test. Disable test with e.g. TEST_TYPES="sanity and not azure"' plan_parameters = { 'CLIENT_ID': client_id, 'CLIENT_SECRET': os.getenv('AZURE_CLIENT_SECRET'), 'TENANT_ID': os.getenv('AZURE_TENANT_ID'), 'AZURE_STORAGE_ACCOUNT': os.getenv('AZURE_STORAGE_ACCOUNT'), 'AZURE_STORAGE_KEY': os.getenv('AZURE_STORAGE_KEY'), 'CONTAINER_NAME': os.getenv('CONTAINER_NAME', 'cassandra-test'), 'SNAPSHOT_NAME': str(uuid.uuid1()), 'CASSANDRA_KEYSPACES': '"testspace1 testspace2"', } config.run_backup_and_restore( config.get_foldered_service_name(), 'backup-azure', 'restore-azure', plan_parameters, config.get_foldered_node_address())
def test_backup_and_restore_to_s3() -> None: key_id = os.getenv("AWS_ACCESS_KEY_ID") if not key_id: assert ( False ), 'AWS credentials are required for this test. Disable test with e.g. TEST_TYPES="sanity and not aws"' plan_parameters = { "AWS_ACCESS_KEY_ID": key_id, "AWS_SECRET_ACCESS_KEY": os.getenv("AWS_SECRET_ACCESS_KEY"), "AWS_REGION": os.getenv("AWS_REGION", "us-west-2"), "S3_BUCKET_NAME": os.getenv("AWS_BUCKET_NAME", "infinity-framework-test"), "SNAPSHOT_NAME": str(uuid.uuid1()), "CASSANDRA_KEYSPACES": '"testspace1 testspace2"', } config.run_backup_and_restore( config.get_foldered_service_name(), "backup-s3", "restore-s3", plan_parameters, config.get_foldered_node_address(), )
def test_backup_and_restore_to_azure() -> None: client_id = os.getenv("AZURE_CLIENT_ID") if not client_id: assert ( False ), 'Azure credentials are required for this test. Disable test with e.g. TEST_TYPES="sanity and not azure"' plan_parameters = { "CLIENT_ID": client_id, "CLIENT_SECRET": os.getenv("AZURE_CLIENT_SECRET"), "TENANT_ID": os.getenv("AZURE_TENANT_ID"), "AZURE_STORAGE_ACCOUNT": os.getenv("AZURE_STORAGE_ACCOUNT"), "AZURE_STORAGE_KEY": os.getenv("AZURE_STORAGE_KEY"), "CONTAINER_NAME": os.getenv("CONTAINER_NAME", "cassandra-test"), "SNAPSHOT_NAME": str(uuid.uuid1()), "CASSANDRA_KEYSPACES": '"testspace1 testspace2"', } config.run_backup_and_restore( config.get_foldered_service_name(), "backup-azure", "restore-azure", plan_parameters, config.get_foldered_node_address(), )
def test_service_health(): assert shakedown.service_healthy(config.get_foldered_service_name())
def install_jmx_configured_cassandra(self_signed_trust_store: bool = True, authentication: bool = True): foldered_name = config.get_foldered_service_name() test_jobs: List[Dict[str, Any]] = [] if authentication: test_jobs = config.get_all_jobs( node_address=config.get_foldered_node_address(), auth=True) else: test_jobs = config.get_all_jobs( node_address=config.get_foldered_node_address()) # destroy/reinstall any prior leftover jobs, so that they don't touch the newly installed service: for job in test_jobs: sdk_jobs.install_job(job) sdk_install.uninstall(config.PACKAGE_NAME, foldered_name) install_jmx_secrets() service_options = { "service": { "name": foldered_name, "jmx": { "enabled": True, "rmi_port": 31198, "password_file": PASSWORD_FILE, "access_file": ACCESS_FILE, "key_store": KEY_STORE, "key_store_password_file": KEY_STORE_PASS, }, } } if self_signed_trust_store: service_options = sdk_utils.merge_dictionaries( { "service": { "jmx": { "add_trust_store": True, "trust_store": TRUST_STORE, "trust_store_password_file": TRUST_STORE_PASS, } } }, service_options, ) if authentication: secret_path = foldered_name + "/" + config.SECRET_VALUE create_secret(secret_value=config.SECRET_VALUE, secret_path=secret_path) service_options = sdk_utils.merge_dictionaries( { "service": { "security": { "authentication": { "enabled": True, "superuser": { "password_secret_path": secret_path }, }, "authorization": { "enabled": True }, } } }, service_options, ) sdk_install.install( config.PACKAGE_NAME, foldered_name, config.DEFAULT_TASK_COUNT, additional_options=service_options, )
def test_mesos_v0_api(): service_name = config.get_foldered_service_name() prior_api_version = sdk_marathon.get_mesos_api_version(service_name) if prior_api_version is not "V0": sdk_marathon.set_mesos_api_version(service_name, "V0") sdk_marathon.set_mesos_api_version(service_name, prior_api_version)