def test_backup_and_restore_to_s3_with_jmx_with_auth(): key_id = os.getenv("AWS_ACCESS_KEY_ID") if not key_id: assert ( False ), 'AWS credentials are required for this test. Disable test with e.g. TEST_TYPES="sanity and not aws"' plan_parameters = { "AWS_ACCESS_KEY_ID": key_id, "AWS_SECRET_ACCESS_KEY": os.getenv("AWS_SECRET_ACCESS_KEY"), "AWS_REGION": os.getenv("AWS_REGION", "us-west-2"), "S3_BUCKET_NAME": os.getenv("AWS_BUCKET_NAME", "infinity-framework-test"), "SNAPSHOT_NAME": str(uuid.uuid1()), "CASSANDRA_KEYSPACES": '"testspace1 testspace2"', } config.run_backup_and_restore_with_auth( config.get_foldered_service_name(), "backup-s3", "restore-s3", plan_parameters, config.get_foldered_node_address(), ) sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name()) test_jobs: List[Dict[str, Any]] = [] test_jobs = config.get_all_jobs( node_address=config.get_foldered_node_address(), auth=True) for job in test_jobs: sdk_jobs.remove_job(job)
def test_udf() -> None: test_jobs: List[Dict[str, Any]] = [] try: test_jobs = config.get_udf_jobs( node_address=config.get_foldered_node_address()) # destroy/reinstall any prior leftover jobs, so that they don't touch the newly installed service: for job in test_jobs: sdk_jobs.install_job(job) new_config = { "cassandra": { "enable_user_defined_functions": True, "enable_scripted_user_defined_functions": True, } } sdk_service.update_configuration( config.PACKAGE_NAME, config.get_foldered_service_name(), new_config, config.DEFAULT_TASK_COUNT, ) config.verify_client_can_write_read_udf( config.get_foldered_node_address()) finally: # remove job definitions from metronome for job in test_jobs: sdk_jobs.remove_job(job)
def test_repair_cleanup_plans_complete(): parameters = {'CASSANDRA_KEYSPACE': 'testspace1'} # populate 'testspace1' for test, then delete afterwards: with sdk_jobs.RunJobContext( before_jobs=[ config.get_write_data_job( node_address=config.get_foldered_node_address()), config.get_verify_data_job( node_address=config.get_foldered_node_address()) ], after_jobs=[ config.get_delete_data_job( node_address=config.get_foldered_node_address()), config.get_verify_deletion_job( node_address=config.get_foldered_node_address()) ]): sdk_plan.start_plan(config.get_foldered_service_name(), 'cleanup', parameters=parameters) sdk_plan.wait_for_completed_plan(config.get_foldered_service_name(), 'cleanup') sdk_plan.start_plan(config.get_foldered_service_name(), 'repair', parameters=parameters) sdk_plan.wait_for_completed_plan(config.get_foldered_service_name(), 'repair')
def configure_package(configure_security): test_jobs = [] try: test_jobs = config.get_all_jobs( node_address=config.get_foldered_node_address()) sdk_install.uninstall(config.get_foldered_service_name(), package_name=config.PACKAGE_NAME) # user=root because Azure CLI needs to run in root... sdk_install.install(config.PACKAGE_NAME, config.DEFAULT_TASK_COUNT, service_name=config.get_foldered_service_name(), additional_options={ "service": { "name": config.get_foldered_service_name(), "user": "******" } }) tmp_dir = tempfile.mkdtemp(prefix='cassandra-test') for job in test_jobs: sdk_jobs.install_job(job, tmp_dir=tmp_dir) yield # let the test session execute finally: sdk_install.uninstall(config.get_foldered_service_name(), package_name=config.PACKAGE_NAME) # remove job definitions from metronome for job in test_jobs: sdk_jobs.remove_job(job)
def configure_package(configure_security): test_jobs = [] try: test_jobs = config.get_all_jobs( node_address=config.get_foldered_node_address()) sdk_install.uninstall(config.get_foldered_service_name(), package_name=config.PACKAGE_NAME) sdk_upgrade.test_upgrade( "beta-{}".format(config.PACKAGE_NAME), config.PACKAGE_NAME, config.DEFAULT_TASK_COUNT, service_name=config.get_foldered_service_name(), additional_options={ "service": { "name": config.get_foldered_service_name() } }) tmp_dir = tempfile.mkdtemp(prefix='cassandra-test') for job in test_jobs: sdk_jobs.install_job(job, tmp_dir=tmp_dir) yield # let the test session execute finally: sdk_install.uninstall(config.get_foldered_service_name(), package_name=config.PACKAGE_NAME) for job in test_jobs: sdk_jobs.remove_job(job)
def configure_package(configure_security: None) -> Iterator[None]: test_jobs: List[Dict[str, Any]] = [] try: test_jobs = config.get_all_jobs(node_address=config.get_foldered_node_address()) # destroy/reinstall any prior leftover jobs, so that they don't touch the newly installed service: for job in test_jobs: sdk_jobs.install_job(job) sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name()) # user=root because Azure CLI needs to run in root... # We don't run the Azure tests in strict however, so don't set it then. if os.environ.get("SECURITY") == "strict": additional_options = {"service": {"name": config.get_foldered_service_name()}} else: additional_options = { "service": {"name": config.get_foldered_service_name(), "user": "******"} } sdk_install.install( config.PACKAGE_NAME, config.get_foldered_service_name(), config.DEFAULT_TASK_COUNT, additional_options=additional_options, ) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name()) # remove job definitions from metronome for job in test_jobs: sdk_jobs.remove_job(job)
def configure_package(configure_security): test_jobs = [] try: test_jobs = config.get_all_jobs( node_address=config.get_foldered_node_address()) # destroy/reinstall any prior leftover jobs, so that they don't touch the newly installed service: for job in test_jobs: sdk_jobs.install_job(job) sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name()) sdk_upgrade.test_upgrade(config.PACKAGE_NAME, config.get_foldered_service_name(), config.DEFAULT_TASK_COUNT, additional_options={ "service": { "name": config.get_foldered_service_name() } }) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name()) for job in test_jobs: sdk_jobs.remove_job(job)
def configure_package(configure_security): test_jobs = [] try: test_jobs = config.get_all_jobs(node_address=config.get_foldered_node_address()) # destroy/reinstall any prior leftover jobs, so that they don't touch the newly installed service: for job in test_jobs: sdk_jobs.install_job(job) sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name()) # user=root because Azure CLI needs to run in root... # We don't run the Azure tests in strict however, so don't set it then. if os.environ.get("SECURITY") == "strict": additional_options={"service": { "name": config.get_foldered_service_name() } } else: additional_options={"service": { "name": config.get_foldered_service_name(), "user": "******" } } sdk_install.install( config.PACKAGE_NAME, config.get_foldered_service_name(), config.DEFAULT_TASK_COUNT, additional_options=additional_options) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name()) # remove job definitions from metronome for job in test_jobs: sdk_jobs.remove_job(job)
def configure_package(configure_security): test_jobs = [] try: test_jobs = config.get_all_jobs( node_address=config.get_foldered_node_address()) # destroy any leftover jobs first, so that they don't touch the newly installed service: for job in test_jobs: sdk_jobs.remove_job(job) sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name()) sdk_upgrade.test_upgrade(config.PACKAGE_NAME, config.get_foldered_service_name(), config.DEFAULT_TASK_COUNT, additional_options={ "service": { "name": config.get_foldered_service_name() } }) tmp_dir = tempfile.mkdtemp(prefix='cassandra-test') for job in test_jobs: sdk_jobs.install_job(job, tmp_dir=tmp_dir) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name()) for job in test_jobs: sdk_jobs.remove_job(job)
def test_backup_and_restore_to_s3(): key_id = os.getenv('AWS_ACCESS_KEY_ID') if not key_id: assert False, 'AWS credentials are required for this test. Disable test with e.g. TEST_TYPES="sanity and not aws"' plan_parameters = { 'AWS_ACCESS_KEY_ID': key_id, 'AWS_SECRET_ACCESS_KEY': os.getenv('AWS_SECRET_ACCESS_KEY'), 'AWS_REGION': os.getenv('AWS_REGION', 'us-west-2'), 'S3_BUCKET_NAME': os.getenv('AWS_BUCKET_NAME', 'infinity-framework-test'), 'SNAPSHOT_NAME': str(uuid.uuid1()), 'CASSANDRA_KEYSPACES': '"testspace1 testspace2"', } config.run_backup_and_restore(config.get_foldered_service_name(), 'backup-s3', 'restore-s3', plan_parameters, config.get_foldered_node_address())
def test_backup_and_restore_to_azure(): client_id = os.getenv('AZURE_CLIENT_ID') if not client_id: assert False, 'Azure credentials are required for this test. Disable test with e.g. TEST_TYPES="sanity and not azure"' plan_parameters = { 'AZURE_CLIENT_ID': client_id, 'AZURE_CLIENT_SECRET': os.getenv('AZURE_CLIENT_SECRET'), 'AZURE_TENANT_ID': os.getenv('AZURE_TENANT_ID'), 'AZURE_STORAGE_ACCOUNT': os.getenv('AZURE_STORAGE_ACCOUNT'), 'AZURE_STORAGE_KEY': os.getenv('AZURE_STORAGE_KEY'), 'CONTAINER_NAME': os.getenv('CONTAINER_NAME', 'cassandra-test'), 'SNAPSHOT_NAME': str(uuid.uuid1()), 'CASSANDRA_KEYSPACES': '"testspace1 testspace2"', } run_backup_and_restore(config.get_foldered_service_name(), 'backup-azure', 'restore-azure', plan_parameters, config.get_foldered_node_address())
def test_backup_and_restore_to_s3(): print("MDS debugging..") traceback.print_stack() key_id = os.getenv('MDS_AWS_ACCESS_ID') if not key_id: assert False, 'AWS credentials are required for this test. Disable test with e.g. TEST_TYPES="sanity and not aws"' plan_parameters = { 'AWS_ACCESS_KEY_ID': key_id, 'AWS_SECRET_ACCESS_KEY': os.getenv('MDS_AWS_ACCOUNT_KEY'), 'AWS_REGION': 'us-west-2', 'external_location': 'shaugupt245-backup', 'backup_name': str(uuid.uuid1()), 'CASSANDRA_KEYSPACES': '"system"', } config.run_backup_and_restore(config.get_foldered_service_name(), 'backup-s3', 'restore-s3', plan_parameters, config.get_foldered_node_address())
def configure_package(configure_security): test_jobs = [] try: test_jobs = config.get_all_jobs( node_address=config.get_foldered_node_address()) # destroy any leftover jobs first, so that they don't touch the newly installed service: for job in test_jobs: sdk_jobs.remove_job(job) sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name()) # user=root because Azure CLI needs to run in root... # We don't run the Azure tests in strict however, so don't set it then. if os.environ.get("SECURITY") == "strict": additional_options = { "service": { "name": config.get_foldered_service_name() } } else: additional_options = { "service": { "name": config.get_foldered_service_name(), "user": "******" } } sdk_install.install(config.PACKAGE_NAME, config.get_foldered_service_name(), config.DEFAULT_TASK_COUNT, additional_options=additional_options) tmp_dir = tempfile.mkdtemp(prefix='cassandra-test') for job in test_jobs: sdk_jobs.install_job(job, tmp_dir=tmp_dir) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name()) # remove job definitions from metronome for job in test_jobs: sdk_jobs.remove_job(job)
def test_backup_and_restore_to_s3(): key_id = os.getenv('AWS_ACCESS_KEY_ID') if not key_id: assert False, 'AWS credentials are required for this test. Disable test with e.g. TEST_TYPES="sanity and not aws"' plan_parameters = { 'AWS_ACCESS_KEY_ID': key_id, 'AWS_SECRET_ACCESS_KEY': os.getenv('AWS_SECRET_ACCESS_KEY'), 'AWS_REGION': os.getenv('AWS_REGION', 'us-west-2'), 'S3_BUCKET_NAME': os.getenv('AWS_BUCKET_NAME', 'infinity-framework-test'), 'SNAPSHOT_NAME': str(uuid.uuid1()), 'CASSANDRA_KEYSPACES': '"testspace1 testspace2"', } config.run_backup_and_restore( config.get_foldered_service_name(), 'backup-s3', 'restore-s3', plan_parameters, config.get_foldered_node_address())
def test_backup_and_restore_to_azure(): client_id = os.getenv('AZURE_CLIENT_ID') if not client_id: assert False, 'Azure credentials are required for this test. Disable test with e.g. TEST_TYPES="sanity and not azure"' plan_parameters = { 'CLIENT_ID': client_id, 'CLIENT_SECRET': os.getenv('AZURE_CLIENT_SECRET'), 'TENANT_ID': os.getenv('AZURE_TENANT_ID'), 'AZURE_STORAGE_ACCOUNT': os.getenv('AZURE_STORAGE_ACCOUNT'), 'AZURE_STORAGE_KEY': os.getenv('AZURE_STORAGE_KEY'), 'CONTAINER_NAME': os.getenv('CONTAINER_NAME', 'cassandra-test'), 'SNAPSHOT_NAME': str(uuid.uuid1()), 'CASSANDRA_KEYSPACES': '"testspace1 testspace2"', } config.run_backup_and_restore( config.get_foldered_service_name(), 'backup-azure', 'restore-azure', plan_parameters, config.get_foldered_node_address())
def test_backup_and_restore_to_s3() -> None: key_id = os.getenv("AWS_ACCESS_KEY_ID") if not key_id: assert ( False ), 'AWS credentials are required for this test. Disable test with e.g. TEST_TYPES="sanity and not aws"' plan_parameters = { "AWS_ACCESS_KEY_ID": key_id, "AWS_SECRET_ACCESS_KEY": os.getenv("AWS_SECRET_ACCESS_KEY"), "AWS_REGION": os.getenv("AWS_REGION", "us-west-2"), "S3_BUCKET_NAME": os.getenv("AWS_BUCKET_NAME", "infinity-framework-test"), "SNAPSHOT_NAME": str(uuid.uuid1()), "CASSANDRA_KEYSPACES": '"testspace1 testspace2"', } config.run_backup_and_restore( config.get_foldered_service_name(), "backup-s3", "restore-s3", plan_parameters, config.get_foldered_node_address(), )
def test_backup_and_restore_to_azure() -> None: client_id = os.getenv("AZURE_CLIENT_ID") if not client_id: assert ( False ), 'Azure credentials are required for this test. Disable test with e.g. TEST_TYPES="sanity and not azure"' plan_parameters = { "CLIENT_ID": client_id, "CLIENT_SECRET": os.getenv("AZURE_CLIENT_SECRET"), "TENANT_ID": os.getenv("AZURE_TENANT_ID"), "AZURE_STORAGE_ACCOUNT": os.getenv("AZURE_STORAGE_ACCOUNT"), "AZURE_STORAGE_KEY": os.getenv("AZURE_STORAGE_KEY"), "CONTAINER_NAME": os.getenv("CONTAINER_NAME", "cassandra-test"), "SNAPSHOT_NAME": str(uuid.uuid1()), "CASSANDRA_KEYSPACES": '"testspace1 testspace2"', } config.run_backup_and_restore( config.get_foldered_service_name(), "backup-azure", "restore-azure", plan_parameters, config.get_foldered_node_address(), )
def test_auth() -> None: config.verify_client_can_write_read_and_delete_with_auth( config.get_foldered_node_address(), )
def test_backup_and_restore_to_s3_compatible_storage() -> None: try: sdk_install.install( "minio", "minio", expected_running_tasks=0, package_version="0.0.13-RELEASE.2018-10-06T00-15-16Z", wait_for_deployment=False, ) temp_key_id = os.getenv("AWS_ACCESS_KEY_ID") if not temp_key_id: assert ( False ), 'AWS credentials are required for this test. Disable test with e.g. TEST_TYPES="sanity and not aws"' temp_secret_access_key = os.getenv("AWS_SECRET_ACCESS_KEY") options = "" if sdk_utils.is_strict_mode(): sdk_security.create_service_account( service_account_name="marathon-lb-sa", service_account_secret="marathon-lb/service-account-secret", ) sdk_cmd.run_cli( "security org users grant marathon-lb-sa dcos:service:marathon:marathon:services:/ read" ) sdk_cmd.run_cli( 'security org users grant marathon-lb-sa dcos:service:marathon:marathon:admin:events read --description "Allows access to Marathon events"' ) options = { "marathon-lb": { "secret_name": "marathon-lb/service-account-secret", "marathon-uri": "https://marathon.mesos:8443", } } sdk_install.install( "marathon-lb", "marathon-lb", expected_running_tasks=0, additional_options=options, package_version="1.14.0", wait_for_deployment=False, ) host = sdk_marathon.get_scheduler_host("marathon-lb") _, public_node_ip, _ = sdk_cmd.agent_ssh(host, "curl -s ifconfig.co") minio_endpoint_url = "http://" + public_node_ip + ":9000" os.environ["AWS_ACCESS_KEY_ID"] = config.MINIO_AWS_ACCESS_KEY_ID os.environ["AWS_SECRET_ACCESS_KEY"] = config.MINIO_AWS_SECRET_ACCESS_KEY subprocess.run( [ "aws", "s3", "mb", "s3://" + config.MINIO_BUCKET_NAME, "--endpoint", minio_endpoint_url, ] ) plan_parameters = { "AWS_ACCESS_KEY_ID": os.getenv("AWS_ACCESS_KEY_ID"), "AWS_SECRET_ACCESS_KEY": os.getenv("AWS_SECRET_ACCESS_KEY"), "AWS_REGION": os.getenv("AWS_REGION", "us-west-2"), "S3_BUCKET_NAME": config.MINIO_BUCKET_NAME, "SNAPSHOT_NAME": str(uuid.uuid1()), "CASSANDRA_KEYSPACES": '"testspace1 testspace2"', "S3_ENDPOINT_URL": minio_endpoint_url, } config.run_backup_and_restore( config.get_foldered_service_name(), "backup-s3", "restore-s3", plan_parameters, config.get_foldered_node_address(), ) finally: sdk_install.uninstall("minio", "minio") sdk_install.uninstall("marathon-lb", "marathon-lb") os.environ["AWS_ACCESS_KEY_ID"] = temp_key_id os.environ["AWS_SECRET_ACCESS_KEY"] = temp_secret_access_key
def install_jmx_configured_cassandra(self_signed_trust_store: bool = True, authentication: bool = True): foldered_name = config.get_foldered_service_name() test_jobs: List[Dict[str, Any]] = [] if authentication: test_jobs = config.get_all_jobs( node_address=config.get_foldered_node_address(), auth=True) else: test_jobs = config.get_all_jobs( node_address=config.get_foldered_node_address()) # destroy/reinstall any prior leftover jobs, so that they don't touch the newly installed service: for job in test_jobs: sdk_jobs.install_job(job) sdk_install.uninstall(config.PACKAGE_NAME, foldered_name) install_jmx_secrets() service_options = { "service": { "name": foldered_name, "jmx": { "enabled": True, "rmi_port": 31198, "password_file": PASSWORD_FILE, "access_file": ACCESS_FILE, "key_store": KEY_STORE, "key_store_password_file": KEY_STORE_PASS, }, } } if self_signed_trust_store: service_options = sdk_utils.merge_dictionaries( { "service": { "jmx": { "add_trust_store": True, "trust_store": TRUST_STORE, "trust_store_password_file": TRUST_STORE_PASS, } } }, service_options, ) if authentication: secret_path = foldered_name + "/" + config.SECRET_VALUE create_secret(secret_value=config.SECRET_VALUE, secret_path=secret_path) service_options = sdk_utils.merge_dictionaries( { "service": { "security": { "authentication": { "enabled": True, "superuser": { "password_secret_path": secret_path }, }, "authorization": { "enabled": True }, } } }, service_options, ) sdk_install.install( config.PACKAGE_NAME, foldered_name, config.DEFAULT_TASK_COUNT, additional_options=service_options, )