def test_force_unlock_no_lock_id(self, mocker: MockFixture) -> None: mock_layer = mocker.Mock(spec=Layer) mock_layer.gen_providers.return_value = { "terraform": { "backend": { "s3": { "bucket": "opta-tf-state-test-dev1", "key": "dev1", "dynamodb_table": "opta-tf-state-test-dev1", "region": "us-east-1", } } } } mocker.patch("opta.core.terraform.AWS") mock_get_aws_lock_id = mocker.patch( "opta.core.terraform.Terraform._get_aws_lock_id", return_value="", ) mock_force_unlock_nice_run = mocker.patch( "opta.core.terraform.nice_run") Terraform.force_unlock(mock_layer) mock_layer.gen_providers.assert_called_once_with(0, clean=False) mock_get_aws_lock_id.assert_called_once_with(mock_layer) mock_force_unlock_nice_run.assert_not_called()
def test_validate_version_good(self, mocker: MockFixture) -> None: ensure_installed = mocker.patch("opta.core.terraform.ensure_installed") get_version = mocker.patch("opta.core.terraform.Terraform.get_version") get_version.return_value = "1.0.0" Terraform.validate_version() ensure_installed.assert_called_once_with("terraform") get_version.assert_called_once()
def test_validate_version_missing(self, mocker: MockFixture) -> None: ensure_installed = mocker.patch("opta.core.terraform.ensure_installed", side_effect=UserErrors("foobar")) get_version = mocker.patch("opta.core.terraform.Terraform.get_version") with pytest.raises(UserErrors) as e: Terraform.validate_version() ensure_installed.assert_called_once_with("terraform") assert str(e.value) == "foobar" get_version.assert_not_called()
def test_init(self, mocker: MockFixture) -> None: mocker.patch("opta.core.terraform.nice_run") # Calling terraform apply should also call terraform init tf_init = mocker.patch("opta.core.terraform.Terraform.init") fake_layer = mocker.Mock(spec=Layer) fake_layer.cloud = "blah" Terraform.apply(layer=fake_layer) assert tf_init.call_count == 1 # Calling terraform plan should also call terraform init Terraform.plan(layer=fake_layer) assert tf_init.call_count == 2
def test_validate_version_high(self, mocker: MockFixture) -> None: mocker.patch("opta.core.terraform.ensure_installed") get_version = mocker.patch("opta.core.terraform.Terraform.get_version") get_version.return_value = "2.0.0" with pytest.raises(UserErrors) as e: Terraform.validate_version() assert ( str(e.value) == "Invalid terraform version 2.0.0 -- must be less than 2.0.0. Check https://docs.opta.dev/installation/#prerequisites" ) get_version.assert_called_once()
def test_azure_verify_storage(self, mocker: MockFixture) -> None: layer = mocker.Mock(spec=Layer) layer.parent = None layer.cloud = "azurerm" layer.name = "blah" layer.providers = { "azurerm": { "location": "centralus", "tenant_id": "blahbc17-blah-blah-blah-blah291d395b", "subscription_id": "blah99ae-blah-blah-blah-blahd2a04788", } } layer.root.return_value = layer layer.gen_providers.return_value = { "terraform": { "backend": { "azurerm": { "resource_group_name": "dummy_resource_group", "storage_account_name": "dummy_storage_account", "container_name": "dummy_container_name", } } }, "provider": { "azurerm": { "location": "centralus", "tenant_id": "blahbc17-blah-blah-blah-blah291d395b", "subscription_id": "blah99ae-blah-blah-blah-blahd2a04788", } }, } mocked_azure = mocker.patch("opta.core.terraform.Azure") # noqa: F841 assert Terraform._azure_verify_storage(layer)
def test_google_download_state(self, mocker: MockFixture) -> None: layer = mocker.Mock(spec=Layer) layer.gen_providers.return_value = { "terraform": { "backend": { "gcs": { "bucket": "opta-tf-state-test-dev1", "prefix": "dev1" } } }, "provider": { "google": { "region": "us-central1", "project": "dummy-project" } }, } layer.name = "blah" layer.cloud = "google" mocker.patch("opta.core.terraform.Terraform._gcp_verify_storage", return_value=True) patched_init = mocker.patch("opta.core.terraform.Terraform.init", return_value=True) mocked_credentials = mocker.Mock() mocked_gcp_credentials = mocker.patch( "opta.core.terraform.GCP.get_credentials", return_value=[mocked_credentials, "dummy-project"], ) mocked_storage_client = mocker.Mock() mocked_client_constructor = mocker.patch( "opta.core.terraform.storage.Client", return_value=mocked_storage_client) mocked_bucket_object = mocker.Mock() mocked_storage_client.get_bucket.return_value = mocked_bucket_object read_data = '{"a": 1}' mocked_file = mocker.mock_open(read_data=read_data) mocker.patch("opta.core.terraform.os.remove") mocked_open = mocker.patch("opta.core.terraform.open", mocked_file) assert Terraform.download_state(layer) patched_init.assert_not_called() mocked_gcp_credentials.assert_called_once_with() mocked_client_constructor.assert_called_once_with( project="dummy-project", credentials=mocked_credentials) mocked_storage_client.get_bucket.assert_called_once_with( "opta-tf-state-test-dev1") mocked_open.assert_has_calls( [ mocker.call("./tmp.tfstate", "wb"), mocker.call("./tmp.tfstate", "r") ], any_order=True, ) mocked_storage_client.download_blob_to_file.assert_called_once_with( mocker.ANY, mocker.ANY)
def test_force_unlock_azure(self, mocker: MockFixture) -> None: mock_layer = mocker.Mock(spec=Layer) mock_layer.gen_providers.return_value = { "terraform": { "backend": { "azurerm": { "resource_group_name": "dummy_resource_group", "storage_account_name": "dummy_storage_account", "container_name": "dummy_container_name", } } }, "provider": { "azurerm": { "location": "centralus", "tenant_id": "blahbc17-blah-blah-blah-blah291d395b", "subscription_id": "blah99ae-blah-blah-blah-blahd2a04788", } }, } mocker.patch("opta.core.terraform.GCP") mock_get_azure_lock_id = mocker.patch( "opta.core.terraform.Terraform._get_azure_lock_id", return_value="mock_azure_lock_id", ) mock_force_unlock_nice_run = mocker.patch( "opta.core.terraform.nice_run") Terraform.force_unlock(mock_layer) mock_layer.gen_providers.assert_called_once_with(0, clean=False) mock_get_azure_lock_id.assert_called_once_with(mock_layer) mock_force_unlock_nice_run.assert_called_once_with( ["terraform", "force-unlock", mock_get_azure_lock_id.return_value], check=True, use_asyncio_nice_run=True, )
def test_get_modules(self, mocker: MockFixture) -> None: mocker.patch("opta.core.terraform.Terraform.download_state", return_value=True) mocker.patch( "opta.core.terraform.Terraform.get_state", return_value={ "resources": [ { "module": "module.redis", "mode": "managed", "type": "aws_elasticache_replication_group", "name": "redis_cluster", }, { "module": "module.redis", "mode": "data", "type": "aws_eks_cluster_auth", "name": "k8s", }, { "module": "module.redis", "mode": "managed", "type": "aws_elasticache_replication_group", "name": "redis_cluster", }, { "module": "module.doc_db", "mode": "data", "type": "aws_security_group", "name": "security_group", }, { "mode": "data", "type": "aws_caller_identity", "name": "provider" }, { "mode": "data", "type": "aws_eks_cluster_auth", "name": "k8s" }, ] }, ) mocked_layer = mocker.Mock(spec=Layer) mocked_layer.name = "blah" mocked_layer.cloud = "blah" assert {"redis", "doc_db"} == Terraform.get_existing_modules(mocked_layer)
def test_force_unlock_aws(self, mocker: MockFixture) -> None: tf_flags: List[str] = ["-force"] mock_layer = mocker.Mock(spec=Layer) mock_layer.gen_providers.return_value = { "terraform": { "backend": { "s3": { "bucket": "opta-tf-state-test-dev1", "key": "dev1", "dynamodb_table": "opta-tf-state-test-dev1", "region": "us-east-1", } } } } mocker.patch("opta.core.terraform.AWS") mock_get_aws_lock_id = mocker.patch( "opta.core.terraform.Terraform._get_aws_lock_id", return_value="mock_aws_lock_id", ) mock_force_unlock_nice_run = mocker.patch( "opta.core.terraform.nice_run") Terraform.force_unlock(mock_layer, *tf_flags) mock_layer.gen_providers.assert_called_once_with(0, clean=False) mock_get_aws_lock_id.assert_called_once_with(mock_layer) mock_force_unlock_nice_run.assert_called_once_with( [ "terraform", "force-unlock", *tf_flags, mock_get_aws_lock_id.return_value ], check=True, use_asyncio_nice_run=True, )
def test_force_unlock_gcp(self, mocker: MockFixture) -> None: mock_layer = mocker.Mock(spec=Layer) mock_layer.gen_providers.return_value = { "terraform": { "backend": { "gcs": { "bucket": "opta-tf-state-test-dev1", "prefix": "dev1" } } }, "provider": { "google": { "region": "us-central1", "project": "dummy-project" } }, } mocker.patch("opta.core.terraform.GCP") mock_get_gcp_lock_id = mocker.patch( "opta.core.terraform.Terraform._get_gcp_lock_id", return_value="mock_gcp_lock_id", ) mock_force_unlock_nice_run = mocker.patch( "opta.core.terraform.nice_run") Terraform.force_unlock(mock_layer) mock_layer.gen_providers.assert_called_once_with(0, clean=False) mock_get_gcp_lock_id.assert_called_once_with(mock_layer) mock_force_unlock_nice_run.assert_called_once_with( ["terraform", "force-unlock", mock_get_gcp_lock_id.return_value], check=True, use_asyncio_nice_run=True, )
def test_aws_download_state(self, mocker: MockFixture) -> None: layer = mocker.Mock(spec=Layer) layer.gen_providers.return_value = { "terraform": { "backend": { "s3": { "bucket": "opta-tf-state-test-dev1", "key": "dev1", "dynamodb_table": "opta-tf-state-test-dev1", "region": "us-east-1", } } } } layer.name = "blah" layer.cloud = "aws" mocker.patch("opta.core.terraform.Terraform._aws_verify_storage", return_value=True) patched_init = mocker.patch("opta.core.terraform.Terraform.init", return_value=True) mocked_s3_client = mocker.Mock() mocked_boto_client = mocker.patch("opta.core.terraform.boto3.client", return_value=mocked_s3_client) read_data = '{"a": 1}' mocked_file = mocker.mock_open(read_data=read_data) mocker.patch("opta.core.terraform.os.remove") mocked_open = mocker.patch("opta.core.terraform.open", mocked_file) assert Terraform.download_state(layer) layer.gen_providers.assert_called_once_with(0) mocked_s3_client.download_file.assert_called_once_with( Bucket="opta-tf-state-test-dev1", Key="dev1", Filename="./tmp.tfstate") mocked_open.assert_called_once_with("./tmp.tfstate", "r") patched_init.assert_not_called() mocked_boto_client.assert_called_once_with("s3", config=mocker.ANY)
def test_create_azure_state_storage(self, mocker: MockFixture) -> None: layer = mocker.Mock(spec=Layer) layer.parent = None layer.cloud = "azurerm" layer.name = "blah" layer.providers = { "azurerm": { "location": "centralus", "tenant_id": "blahbc17-blah-blah-blah-blah291d395b", "subscription_id": "blah99ae-blah-blah-blah-blahd2a04788", } } layer.root.return_value = layer layer.gen_providers.return_value = { "terraform": { "backend": { "azurerm": { "resource_group_name": "dummy_resource_group", "storage_account_name": "dummy_storage_account", "container_name": "dummy_container_name", } } }, "provider": { "azurerm": { "location": "centralus", "tenant_id": "blahbc17-blah-blah-blah-blah291d395b", "subscription_id": "blah99ae-blah-blah-blah-blahd2a04788", } }, } mocked_azure = mocker.patch("opta.core.terraform.Azure") mocked_credentials = mocker.Mock() mocked_azure.get_credentials.return_value = mocked_credentials mocked_resource_client_instance = mocker.Mock() mocked_resource_client = mocker.patch( "opta.core.terraform.ResourceManagementClient", return_value=mocked_resource_client_instance, ) mocked_rg_result = mocker.Mock() mocked_rg_result.name = "dummy_resource_group" mocked_rg_result.id = "resource_group_id" mocked_resource_client_instance.resource_groups.create_or_update.return_value = ( mocked_rg_result) mocked_authorization_management_client_instance = mocker.Mock() mocked_authorization_management_client = mocker.patch( "opta.core.terraform.AuthorizationManagementClient", return_value=mocked_authorization_management_client_instance, ) mocked_owner_role = mocker.Mock() mocked_owner_role.id = "owner_role_id" storage_role = mocker.Mock() storage_role.id = "storage_role_id" key_vault_role = mocker.Mock() key_vault_role.id = "key_vault_role_id" mocked_authorization_management_client_instance.role_definitions.list.side_effect = [ [mocked_owner_role], [storage_role], [key_vault_role], ] role_assignment = mocker.Mock() role_assignment.role_definition_id = "owner_role_id" mocked_authorization_management_client_instance.role_assignments.list_for_resource_group.return_value = [ role_assignment ] mocked_storage_client_instance = mocker.Mock() mocked_storage_client = mocker.patch( "opta.core.terraform.StorageManagementClient", return_value=mocked_storage_client_instance, ) Terraform.create_state_storage(layer) mocked_azure.get_credentials.assert_called_once_with() mocked_resource_client.assert_called_once_with( mocked_credentials, "blah99ae-blah-blah-blah-blahd2a04788") mocked_authorization_management_client.assert_called_once_with( mocked_credentials, "blah99ae-blah-blah-blah-blahd2a04788", api_version="2018-01-01-preview", ) mocked_resource_client_instance.resource_groups.create_or_update.assert_called_once_with( "dummy_resource_group", {"location": "centralus"}) mocked_authorization_management_client_instance.role_definitions.list.assert_has_calls( [ mocker.call("resource_group_id", filter="roleName eq 'Owner'"), mocker.call("resource_group_id", filter="roleName eq 'Storage Blob Data Owner'"), mocker.call("resource_group_id", filter="roleName eq 'Key Vault Administrator'"), ], any_order=True, ) mocked_authorization_management_client_instance.role_assignments.list_for_resource_group.assert_called_once_with( "dummy_resource_group") mocked_authorization_management_client_instance.role_assignments.create.assert_has_calls( [ mocker.call( scope= "/subscriptions/blah99ae-blah-blah-blah-blahd2a04788/resourceGroups/dummy_resource_group", role_assignment_name=mocker.ANY, parameters={ "role_definition_id": storage_role.id, "principal_id": role_assignment.principal_id, }, ), mocker.call( scope= "/subscriptions/blah99ae-blah-blah-blah-blahd2a04788/resourceGroups/dummy_resource_group", role_assignment_name=mocker.ANY, parameters={ "role_definition_id": key_vault_role.id, "principal_id": role_assignment.principal_id, }, ), ], any_order=True, ) mocked_storage_client.assert_called_once_with( mocked_credentials, "blah99ae-blah-blah-blah-blahd2a04788") mocked_storage_client_instance.storage_accounts.get_properties.assert_called_once_with( "dummy_resource_group", "dummy_storage_account") mocked_storage_client_instance.blob_containers.get.assert_called_once_with( "dummy_resource_group", "dummy_storage_account", "dummy_container_name")
def dependency_check() -> None: """Check dependencies that are required globally""" Terraform.validate_version()
def force_unlock( config: str, env: Optional[str], local: Optional[bool], var: Dict[str, str], ) -> None: """Release a stuck lock on the current workspace Manually unlock the state for the defined configuration. This will not modify your infrastructure. This command removes the lock on the state for the current workspace. Examples: opta force-unlock -c my-config.yaml -e prod """ try: opta_acquire_lock() tf_flags: List[str] = [] config = check_opta_file_exists(config) if local: config = local_setup(config, input_variables=var) amplitude_client.send_event(amplitude_client.FORCE_UNLOCK_EVENT) layer = Layer.load_from_yaml( config, env, input_variables=var, strict_input_variables=False ) layer.verify_cloud_credentials() modules = Terraform.get_existing_modules(layer) layer.modules = [x for x in layer.modules if x.name in modules] gen_all(layer) tf_lock_exists, _ = Terraform.tf_lock_details(layer) if tf_lock_exists: Terraform.init(layer=layer) click.confirm( "This will remove the lock on the remote state." "\nPlease make sure that no other instance of opta command is running on this file." "\nDo you still want to proceed?", abort=True, ) tf_flags.append("-force") Terraform.force_unlock(layer, *tf_flags) if Terraform.download_state(layer): if layer.parent is not None or "k8scluster" in modules: set_kube_config(layer) kube_context = layer.get_cloud_client().get_kube_context_name() pending_upgrade_release_list = Helm.get_helm_list( kube_context=kube_context, status="pending-upgrade" ) click.confirm( "Do you also wish to Rollback the Helm releases in Pending-Upgrade State?" "\nPlease make sure that no other instance of opta command is running on this file." "\nDo you still want to proceed?", abort=True, ) for release in pending_upgrade_release_list: Helm.rollback_helm( kube_context, release["name"], namespace=release["namespace"], revision=release["revision"], ) finally: opta_release_lock()
def test_create_aws_state_storage(self, mocker: MockFixture) -> None: layer = mocker.Mock(spec=Layer) layer.gen_providers.return_value = { "terraform": { "backend": { "s3": { "bucket": "opta-tf-state-test-dev1", "key": "dev1", "dynamodb_table": "opta-tf-state-test-dev1", "region": "us-east-1", } } } } mocked_s3_client = mocker.Mock() mocked_dynamodb_client = mocker.Mock() mocked_iam_client = mocker.Mock() mocked_boto3 = mocker.patch("opta.core.terraform.boto3") mocked_boto3.client.side_effect = [ mocked_s3_client, mocked_dynamodb_client, mocked_iam_client, ] mocked_s3_client.get_bucket_encryption.side_effect = ClientError( error_response={ "Error": { "Code": "NoSuchBucket", "Message": "Blah" } }, operation_name="Blah", ) mocked_dynamodb_client.describe_table.side_effect = ClientError( error_response={ "Error": { "Code": "ResourceNotFoundException", "Message": "Blah" } }, operation_name="Blah", ) Terraform.create_state_storage(layer) layer.gen_providers.assert_called_once_with(0, clean=False) mocked_dynamodb_client.create_table.assert_called_once_with( TableName="opta-tf-state-test-dev1", KeySchema=[{ "AttributeName": "LockID", "KeyType": "HASH" }], AttributeDefinitions=[{ "AttributeName": "LockID", "AttributeType": "S" }], BillingMode="PROVISIONED", ProvisionedThroughput={ "ReadCapacityUnits": 20, "WriteCapacityUnits": 20 }, ) mocked_s3_client.create_bucket.assert_called_once_with( Bucket="opta-tf-state-test-dev1") mocked_s3_client.put_bucket_encryption.assert_called_once_with( Bucket="opta-tf-state-test-dev1", ServerSideEncryptionConfiguration={ "Rules": [ { "ApplyServerSideEncryptionByDefault": { "SSEAlgorithm": "AES256" } }, ] }, ) # Visit (https://run-x.atlassian.net/browse/RUNX-1125) for further reference mocked_boto3.client.assert_has_calls([ mocker.call("s3", config=mocker.ANY), mocker.call("dynamodb", config=mocker.ANY), mocker.call("iam", config=mocker.ANY), ])
def destroy( config: str, env: Optional[str], auto_approve: bool, detailed_plan: bool, local: Optional[bool], var: Dict[str, str], ) -> None: """Destroy all opta resources from the current config To destroy an environment, you have to first destroy all the services first. Examples: opta destroy -c my-service.yaml --auto-approve opta destroy -c my-env.yaml --auto-approve """ try: opta_acquire_lock() pre_check() logger.warning( "You are destroying your cloud infra state. DO NOT, I REPEAT, DO NOT do this as " "an attempt to debug a weird/errored apply. What you have created is not some ephemeral object that can be " "tossed arbitrarily (perhaps some day) and destroying unnecessarily just to reapply typically makes it " "worse. If you're doing this cause you are really trying to destroy the environment entirely, then that's" "perfectly fine-- if not then please reach out to the opta team in the slack workspace " "(https://slack.opta.dev) and I promise that they'll be happy to help debug." ) config = check_opta_file_exists(config) if local: config, _ = _handle_local_flag(config, False) _clean_tf_folder() layer = Layer.load_from_yaml(config, env, input_variables=var) event_properties: Dict = layer.get_event_properties() amplitude_client.send_event( amplitude_client.DESTROY_EVENT, event_properties=event_properties, ) layer.verify_cloud_credentials() layer.validate_required_path_dependencies() if not Terraform.download_state(layer): logger.info( "The opta state could not be found. This may happen if destroy ran successfully before." ) return tf_lock_exists, _ = Terraform.tf_lock_details(layer) if tf_lock_exists: raise UserErrors(USER_ERROR_TF_LOCK) # Any child layers should be destroyed first before the current layer. children_layers = _fetch_children_layers(layer) if children_layers: # TODO: ideally we can just automatically destroy them but it's # complicated... logger.error( "Found the following services that depend on this environment. Please run `opta destroy` on them first!\n" + "\n".join(children_layers) ) raise UserErrors("Dependant services found!") tf_flags: List[str] = [] if auto_approve: sleep_time = 5 logger.info( f"{attr('bold')}Opta will now destroy the {attr('underlined')}{layer.name}{attr(0)}" f"{attr('bold')} layer.{attr(0)}\n" f"{attr('bold')}Sleeping for {attr('underlined')}{sleep_time} secs{attr(0)}" f"{attr('bold')}, press Ctrl+C to Abort.{attr(0)}" ) time.sleep(sleep_time) tf_flags.append("-auto-approve") modules = Terraform.get_existing_modules(layer) layer.modules = [x for x in layer.modules if x.name in modules] gen_all(layer) Terraform.init(False, "-reconfigure", layer=layer) Terraform.refresh(layer) idx = len(layer.modules) - 1 for module in reversed(layer.modules): try: module_address_prefix = f"-target=module.{module.name}" logger.info("Planning your changes (might take a minute)") Terraform.plan( "-lock=false", "-input=false", "-destroy", f"-out={TF_PLAN_PATH}", layer=layer, *list([module_address_prefix]), ) PlanDisplayer.display(detailed_plan=detailed_plan) tf_flags = [] if not auto_approve: click.confirm( "The above are the planned changes for your opta run. Do you approve?", abort=True, ) else: tf_flags.append("-auto-approve") Terraform.apply(layer, *tf_flags, TF_PLAN_PATH, no_init=True, quiet=False) layer.post_delete(idx) idx -= 1 except Exception as e: raise e Terraform.delete_state_storage(layer) finally: opta_release_lock()
def test_azure_download_state(self, mocker: MockFixture) -> None: layer = mocker.Mock(spec=Layer) layer.parent = None layer.cloud = "azurerm" layer.name = "blah" layer.providers = { "azurerm": { "location": "centralus", "tenant_id": "blahbc17-blah-blah-blah-blah291d395b", "subscription_id": "blah99ae-blah-blah-blah-blahd2a04788", } } layer.root.return_value = layer layer.gen_providers.return_value = { "terraform": { "backend": { "azurerm": { "resource_group_name": "dummy_resource_group", "storage_account_name": "dummy_storage_account", "container_name": "dummy_container_name", "key": "dummy_key", } } }, "provider": { "azurerm": { "location": "centralus", "tenant_id": "blahbc17-blah-blah-blah-blah291d395b", "subscription_id": "blah99ae-blah-blah-blah-blahd2a04788", } }, } mocked_azure = mocker.patch("opta.core.terraform.Azure") mocked_credentials = mocker.Mock() mocked_azure.get_credentials.return_value = mocked_credentials mocker.patch("opta.core.terraform.Terraform._azure_verify_storage", return_value=True) mocked_blob_service_client_instance = mocker.Mock() mocked_blob_service_client = mocker.patch( "opta.core.terraform.BlobServiceClient", return_value=mocked_blob_service_client_instance, ) mocked_container_client = mocker.Mock() mocked_blob_service_client_instance.get_container_client.return_value = ( mocked_container_client) mocked_blob_client = mocker.Mock() mocked_container_client.get_blob_client.return_value = mocked_blob_client read_data = '{"a": 1}' mocked_file = mocker.mock_open(read_data=read_data) mocker.patch("opta.core.terraform.os.remove") mocked_open = mocker.patch("opta.core.terraform.open", mocked_file) assert Terraform.download_state(layer) mocked_blob_service_client.assert_called_once_with( "https://dummy_storage_account.blob.core.windows.net/", credential=mocked_credentials, ) mocked_blob_service_client_instance.get_container_client.assert_called_once_with( "dummy_container_name") mocked_container_client.get_blob_client.assert_called_once_with( "dummy_key") mocked_open.assert_has_calls( [ mocker.call("./tmp.tfstate", "wb"), mocker.call("./tmp.tfstate", "r") ], any_order=True, )
def _apply( config: str, env: Optional[str], refresh: bool, local: bool, image_tag: Optional[str], test: bool, auto_approve: bool, input_variables: Dict[str, str], image_digest: Optional[str] = None, stdout_logs: bool = True, detailed_plan: bool = False, ) -> None: pre_check() _clean_tf_folder() if local and not test: config = local_setup(config, input_variables, image_tag, refresh_local_env=True) layer = Layer.load_from_yaml(config, env, input_variables=input_variables) layer.verify_cloud_credentials() layer.validate_required_path_dependencies() if Terraform.download_state(layer): tf_lock_exists, _ = Terraform.tf_lock_details(layer) if tf_lock_exists: raise UserErrors(USER_ERROR_TF_LOCK) _verify_parent_layer(layer, auto_approve) event_properties: Dict = layer.get_event_properties() amplitude_client.send_event( amplitude_client.START_GEN_EVENT, event_properties=event_properties, ) # We need a region with at least 3 AZs for leader election during failover. # Also EKS historically had problems with regions that have fewer than 3 AZs. if layer.cloud == "aws": providers = layer.gen_providers(0)["provider"] aws_region = providers["aws"]["region"] azs = _fetch_availability_zones(aws_region) if len(azs) < 3: raise UserErrors( fmt_msg(f""" Opta requires a region with at least *3* availability zones like us-east-1 or us-west-2. ~You configured {aws_region}, which only has the availability zones: {azs}. ~Please choose a different region. """)) Terraform.create_state_storage(layer) gen_opta_resource_tags(layer) cloud_client: CloudClient if layer.cloud == "aws": cloud_client = AWS(layer) elif layer.cloud == "google": cloud_client = GCP(layer) elif layer.cloud == "azurerm": cloud_client = Azure(layer) elif layer.cloud == "local": if local: # boolean passed via cli pass cloud_client = Local(layer) elif layer.cloud == "helm": cloud_client = HelmCloudClient(layer) else: raise Exception(f"Cannot handle upload config for cloud {layer.cloud}") existing_config: Optional[ StructuredConfig] = cloud_client.get_remote_config() old_semver_string = ("" if existing_config is None else existing_config.get("opta_version", "").strip("v")) current_semver_string = VERSION.strip("v") _verify_semver(old_semver_string, current_semver_string, layer, auto_approve) try: existing_modules: Set[str] = set() first_loop = True for module_idx, current_modules, total_block_count in gen( layer, existing_config, image_tag, image_digest, test, True, auto_approve): if first_loop: # This is set during the first iteration, since the tf file must exist. existing_modules = Terraform.get_existing_modules(layer) first_loop = False configured_modules = set([x.name for x in current_modules]) is_last_module = module_idx == total_block_count - 1 has_new_modules = not configured_modules.issubset(existing_modules) if not is_last_module and not has_new_modules and not refresh: continue if is_last_module: untouched_modules = existing_modules - configured_modules configured_modules = configured_modules.union( untouched_modules) layer.pre_hook(module_idx) if layer.cloud == "local": if is_last_module: targets = [] else: targets = list( map(lambda x: f"-target=module.{x}", sorted(configured_modules))) if test: Terraform.plan("-lock=false", *targets, layer=layer) print( "Plan ran successfully, not applying since this is a test." ) else: current_properties = event_properties.copy() current_properties["module_idx"] = module_idx amplitude_client.send_event( amplitude_client.APPLY_EVENT, event_properties=current_properties, ) logger.info("Planning your changes (might take a minute)") try: Terraform.plan( "-lock=false", "-input=false", f"-out={TF_PLAN_PATH}", layer=layer, *targets, quiet=True, ) except CalledProcessError as e: logger.error(e.stderr or "") raise e PlanDisplayer.display(detailed_plan=detailed_plan) if not auto_approve: click.confirm( "The above are the planned changes for your opta run. Do you approve?", abort=True, ) logger.info("Applying your changes (might take a minute)") service_modules = (layer.get_module_by_type( "k8s-service", module_idx) if layer.cloud == "aws" else layer.get_module_by_type( "gcp-k8s-service", module_idx)) if (len(service_modules) != 0 and cluster_exist(layer.root()) and stdout_logs): service_module = service_modules[0] # Tailing logs logger.info( f"Identified deployment for kubernetes service module {service_module.name}, tailing logs now." ) new_thread = Thread( target=tail_module_log, args=( layer, service_module.name, 10, datetime.datetime.utcnow().replace( tzinfo=pytz.UTC), 2, ), daemon=True, ) # Tailing events new_thread.start() new_thread = Thread( target=tail_namespace_events, args=( layer, datetime.datetime.utcnow().replace( tzinfo=pytz.UTC), 3, ), daemon=True, ) new_thread.start() tf_flags: List[str] = [] if auto_approve: tf_flags.append("-auto-approve") try: Terraform.apply(layer, *tf_flags, TF_PLAN_PATH, no_init=True, quiet=False) except Exception as e: layer.post_hook(module_idx, e) raise e else: layer.post_hook(module_idx, None) cloud_client.upload_opta_config() logger.info("Opta updates complete!") except Exception as e: event_properties["success"] = False event_properties["error_name"] = e.__class__.__name__ raise e else: event_properties["success"] = True finally: amplitude_client.send_event( amplitude_client.FINISH_GEN_EVENT, event_properties=event_properties, )
def test_create_google_state_storage(self, mocker: MockFixture) -> None: layer = mocker.Mock(spec=Layer) layer.gen_providers.return_value = { "terraform": { "backend": { "gcs": { "bucket": "opta-tf-state-test-dev1", "prefix": "dev1" } } }, "provider": { "google": { "region": "us-central1", "project": "dummy-project" } }, } mocked_gcp = mocker.patch("opta.core.terraform.GCP") mocked_credentials = mocker.Mock() mocked_gcp.get_credentials.return_value = tuple( [mocked_credentials, "dummy-project"]) mocked_storage = mocker.patch("opta.core.terraform.storage") mocked_storage_client = mocker.Mock() mocked_storage.Client.return_value = mocked_storage_client get_bucket_error = GoogleClientError(message="blah") get_bucket_error.code = 404 mocked_storage_client.get_bucket.side_effect = get_bucket_error mocked_bucket = mocker.Mock() mocked_bucket.project_number = "123" mocked_storage_client.create_bucket.return_value = mocked_bucket mocked_google_credentials = mocker.patch( "opta.core.terraform.GoogleCredentials") mocked_api_credentials = mocker.Mock() mocked_google_credentials.get_application_default.return_value = ( mocked_api_credentials) mocked_discovery = mocker.patch("opta.core.terraform.discovery") mocked_service = mocker.Mock() mocked_cloudresourcemanager = mocker.Mock() mocked_discovery.build.side_effect = [ mocked_service, mocked_cloudresourcemanager ] mocked_service_services = mocker.Mock() mocked_service.services.return_value = mocked_service_services mocked_request = mocker.Mock() mocked_service_services.enable.return_value = mocked_request mocked_response: dict = {} mocked_request.execute.return_value = mocked_response mocked_sleep = mocker.patch("opta.core.terraform.time.sleep") mocked_cloudresourcemanager_projects = mocker.Mock() mocked_cloudresourcemanager.projects.return_value = ( mocked_cloudresourcemanager_projects) mocked_cloudresourcemanager_request = mocker.Mock() mocked_cloudresourcemanager_projects.get.return_value = ( mocked_cloudresourcemanager_request) mocked_cloudresourcemanager_response: dict = {"projectNumber": "123"} mocked_cloudresourcemanager_request.execute.return_value = ( mocked_cloudresourcemanager_response) Terraform.create_state_storage(layer) mocked_gcp.get_credentials.assert_called_once_with() mocked_storage.Client.assert_called_once_with( project="dummy-project", credentials=mocked_credentials) mocked_storage_client.get_bucket.assert_called_once_with( "opta-tf-state-test-dev1") mocked_storage_client.create_bucket.assert_called_once_with( "opta-tf-state-test-dev1", location="us-central1") mocked_google_credentials.get_application_default.assert_called_once_with( ) mocked_discovery.build.assert_has_calls([ mocker.call( "serviceusage", "v1", credentials=mocked_api_credentials, static_discovery=False, ), mocker.call( "cloudresourcemanager", "v1", credentials=mocked_api_credentials, static_discovery=False, ), ]) mocked_sleep.assert_called_once_with(120)
def display(detailed_plan: bool = False) -> None: if detailed_plan: regular_plan = Terraform.show(TF_PLAN_PATH, capture_output=True) CURRENT_CRASH_REPORTER.tf_plan_text = ansi_scrub(regular_plan or "") print(regular_plan) return plan_dict = json.loads( Terraform.show(*["-no-color", "-json", TF_PLAN_PATH], capture_output=True) # type: ignore ) CURRENT_CRASH_REPORTER.tf_plan_text = ( CURRENT_CRASH_REPORTER.tf_plan_text or json.dumps(plan_dict)) plan_risk = LOW_RISK module_changes: dict = {} resource_change: dict for resource_change in plan_dict.get("resource_changes", []): if resource_change.get("change", {}).get("actions", ["no-op"]) == ["no-op"]: continue address: str = resource_change["address"] if not address.startswith("module."): logger.warning( f"Unable to determine risk of changes to resource {address}. " "Please run in detailed plan mode for more info") module_name = address.split(".")[1] module_changes[module_name] = module_changes.get( module_name, { "risk": LOW_RISK, "resources": {} }) resource_name = ".".join(address.split(".")[2:]) actions = resource_change.get("change", {}).get("actions", []) if "create" in actions and "delete" in actions: actions = ["replace"] action = actions[0] if action in ["read", "create"]: current_risk = LOW_RISK action_reason = "data_refresh" if action == "read" else "creation" elif action in ["replace", "delete"]: current_risk = HIGH_RISK action_reason = resource_change.get("action_reason", "N/A") elif action in ["update"]: current_risk, action_reason = PlanDisplayer.handle_update( resource_change) else: raise Exception( f"Do not know how to handle planned action: {action}") module_changes[module_name]["resources"][resource_name] = { "action": action, "reason": action_reason, "risk": current_risk, } module_changes[module_name]["risk"] = _max_risk( module_changes[module_name]["risk"], current_risk) plan_risk = _max_risk(plan_risk, current_risk) logger.info( f"Identified total risk of {RISK_COLORS[plan_risk]}{plan_risk}{attr(0)}.\n" f"{RISK_EXPLANATIONS[plan_risk]}\n" "For additional help, please reach out to the RunX team at https://slack.opta.dev/" ) module_changes_list = sorted( [(k, v) for k, v in module_changes.items()], key=lambda x: x[1]["risk"], reverse=True, ) table = [] for module_name, module_change in module_changes_list: resource_changes_list = sorted( [(k, v) for k, v in module_change["resources"].items()], key=lambda x: x[1]["risk"], reverse=True, ) for resource_name, resource_change in resource_changes_list: current_risk = resource_change["risk"] table.append([ f"{fg('blue')}{module_name}{attr(0)}", resource_name, resource_change["action"], f"{RISK_COLORS[current_risk]}{current_risk}{attr(0)}", resource_change["reason"].replace("_", " "), ]) if len(module_changes) == 0: logger.info("No changes found.") else: print( tabulate( table, ["module", "resource", "action", "risk", "reason"], tablefmt="fancy_grid", )) logger.info( "For more details, please rerun the command with the --detailed-plan flag." )
def deploy( image: str, config: str, env: Optional[str], tag: Optional[str], auto_approve: bool, detailed_plan: bool, local: Optional[bool], var: Dict[str, str], ) -> None: """Deploys an image to Kubernetes - Pushes the local image to private container registry (ECR, GCR, ACR), if configuration contains `image: AUTO`, else uses the image provided from a Repo. - Update the kubernetes deployment to use the new image. - Create new pods to use the new image - automatically done by kubernetes. Examples: opta deploy -c image-auto-configuration.yaml -i image:local --auto-approve opta deploy -c repo-provided-configuration.yaml -e prod opta deploy -c my-service.yaml -i my-image:latest --local Documentation: https://docs.opta.dev/features/custom_image/ """ try: opta_acquire_lock() pre_check() config = check_opta_file_exists(config) if local: config = local_setup(config, image_tag=tag, refresh_local_env=True, input_variables=var) if not is_service_config(config): raise UserErrors( fmt_msg(""" Opta deploy can only run on service yaml files. This is an environment yaml file. ~See https://docs.opta.dev/getting-started/ for more details. ~ ~(We think that this is an environment yaml file, because service yaml must ~specify the "environments" field). """)) layer = Layer.load_from_yaml(config, env, input_variables=var) amplitude_client.send_event( amplitude_client.DEPLOY_EVENT, event_properties={ "org_name": layer.org_name, "layer_name": layer.name }, ) is_auto = __check_layer_and_image(layer, image) layer.verify_cloud_credentials() layer.validate_required_path_dependencies() if Terraform.download_state(layer): tf_lock_exists, _ = Terraform.tf_lock_details(layer) if tf_lock_exists: raise UserErrors(USER_ERROR_TF_LOCK) try: outputs = Terraform.get_outputs(layer) except MissingState: outputs = {} image_digest, image_tag = (None, None) if is_auto: if "docker_repo_url" not in outputs or outputs[ "docker_repo_url"] == "": logger.info( "Did not find docker repository in state, so applying once to create it before deployment" ) _apply( config=config, env=env, refresh=False, image_tag=None, test=False, local=local, auto_approve=auto_approve, stdout_logs=False, detailed_plan=detailed_plan, input_variables=var, ) if image is not None: image_digest, image_tag = push_image( image=image, config=config, env=env, tag=tag, input_variables=var, ) _apply( config=config, env=env, refresh=False, image_tag=None, test=False, local=local, auto_approve=auto_approve, image_digest=image_digest, detailed_plan=detailed_plan, input_variables=var, ) finally: opta_release_lock()