def handle_create(agw_client: APIGatewayClient, model: ResourceModel, callback_context: MutableMapping[str, Any]) -> ProgressEvent: if callback_context.get("green-status") == CREATED: stage_name = "blue" final_state = OperationStatus.SUCCESS else: stage_name = "green" final_state = OperationStatus.IN_PROGRESS try: agw_client.create_stage( restApiId=model.RestApiId, stageName=stage_name, deploymentId=model.DeploymentId, description=f"{stage_name} production stage", cacheClusterEnabled=False, tracingEnabled=model.TracingEnabled ) except agw_client.exceptions.NotFoundException: return ProgressEvent( status=OperationStatus.FAILED, errorCode=HandlerErrorCode.NotFound, message="REST API does not exist" ) model.StageName = "blue" callback_context[f"{stage_name}-status"] = CREATED return ProgressEvent( status=final_state, callbackContext=callback_context, resourceModel=model, )
def create_handler( session: Optional[SessionProxy], request: ResourceHandlerRequest, callback_context: MutableMapping[str, Any], ) -> ProgressEvent: # Typicaly model is in request.desiredResourceState # model = request.desiredResourceState # Work-a-round to create a resource with no properties (and ignore any properties set) model = ResourceModel(ID='', Content='') progress: ProgressEvent = ProgressEvent( status=OperationStatus.IN_PROGRESS, resourceModel=model, ) try: # setting up random primary identifier compliant with cfn standard model.ID = identifier_utils.generate_resource_identifier( stack_id_or_name=request.stackId, logical_resource_id=request.logicalResourceIdentifier, client_request_token=request.clientRequestToken, max_length=255) # Setting Status to success will signal to cfn that the operation is complete # progress.status = OperationStatus.SUCCESS except TypeError as e: # exceptions module lets CloudFormation know the type of failure that occurred raise exceptions.InternalFailure(f"was not expecting type {e}") # this can also be done by returning a failed progress event #return ProgressEvent.failed(HandlerErrorCode.InternalFailure, f"was not expecting type {e}") return ProgressEvent(status=OperationStatus.SUCCESS, resourceModel=model)
def pre_handler(_s, request: HookHandlerRequest, _c, type_configuration: TypeConfigurationModel) -> ProgressEvent: LOG.setLevel(logging.DEBUG) LOG.debug(f"request: {request.__dict__}") LOG.debug( f"type_configuration: {type_configuration.__dict__ if type_configuration else dict()}" ) cfn_model = request.hookContext.targetModel.get("resourceProperties", {}) # If we get a type that we don't care about, we should return InvalidRequest if request.hookContext.targetName not in supported_types: LOG.error("returning invalidRequest") return ProgressEvent( status=OperationStatus.FAILED, errorCode=HandlerErrorCode.InvalidRequest, message=f"This hook only supports {supported_types}") # If EndpointPublicAccess is not defined is default's to True is_public = cfn_model.get("ResourcesVpcConfig", {}).get("EndpointPublicAccess", "true") # cloudformation casts bools to strings, so we've got to cast it back is_public = True if is_public.lower() == "true" else False # Fail if an open ingress rule is found if is_public: return non_compliant( "EKS Clusters cannot have public endpoint enabled. To disable, set the " "EndpointPublicAccess property of ResourcesVpcConfig to false") # Operation is compliant, return success LOG.debug("returning SUCCESS") return ProgressEvent(status=OperationStatus.SUCCESS)
def pre_handler(_s, request: HookHandlerRequest, _c, type_configuration: TypeConfigurationModel) -> ProgressEvent: LOG.setLevel(logging.DEBUG) LOG.debug(f"request: {request.__dict__}") LOG.debug(f"type_configuration: {type_configuration.__dict__ if type_configuration else dict()}") cfn_model = request.hookContext.targetModel.get("resourceProperties", {}) cfn_type = request.hookContext.targetName # If we get a type that we don't care about, we should return InvalidRequest if cfn_type not in supported_types: LOG.error("returning invalidRequest") return ProgressEvent( status=OperationStatus.FAILED, errorCode=HandlerErrorCode.InvalidRequest, message=f"This hook only supports {supported_types}" ) if cfn_type == "AWS::EC2::SecurityGroup": security_groups = cfn_model.get("SecurityGroupIngress", []) else: security_groups = [cfn_model] if cfn_model else [] # Fail if an open ingress rule is found if is_open(security_groups): return non_compliant("Security Group cannot contain rules allow all destinations (0.0.0.0/0 or ::/0)") # Operation is compliant, return success LOG.debug("returning SUCCESS") return ProgressEvent(status=OperationStatus.SUCCESS)
def update_handler( session: Optional[SessionProxy], request: ResourceHandlerRequest, callback_context: MutableMapping[str, Any], ) -> ProgressEvent: model = request.desiredResourceState progress: ProgressEvent = ProgressEvent( status=OperationStatus.IN_PROGRESS, resourceModel=model, ) LOG.info(f"Starting the {TYPE_NAME} Update Handler") downtime_body = build_downtime_struct(model) with v1_client( model.DatadogCredentials.ApiKey, model.DatadogCredentials.ApplicationKey, model.DatadogCredentials.ApiURL, TELEMETRY_TYPE_NAME, __version__, ) as api_client: api_instance = downtimes_api.DowntimesApi(api_client) try: api_instance.update_downtime(model.Id, downtime_body) except ApiException as e: LOG.error( "Exception when calling DowntimeApi->update_downtime: %s\n" % e) return ProgressEvent(status=OperationStatus.FAILED, resourceModel=model, message=e.body) return read_handler(session, request, callback_context)
def read_handler( session: Optional[SessionProxy], request: ResourceHandlerRequest, callback_context: MutableMapping[str, Any], ) -> ProgressEvent: LOG.info("Starting read_handler") model = request.desiredResourceState try: ssm_parameter_action(ssm_action_get, session, request.stackId, const_key_instance_id, model.Id) model = request.desiredResourceState model.IP = ssm_parameter_action(ssm_action_get, session, request.stackId, const_key_IP) model.URL = ssm_parameter_action(ssm_action_get, session, request.stackId, const_key_URL) progress = ProgressEvent(status=OperationStatus.SUCCESS, resourceModel=model) except: progress = ProgressEvent(status=OperationStatus.FAILED, errorCode=HandlerErrorCode.NotFound) LOG.info(f"Exiting read_handler with code {progress.status}") return progress
def delete_handler( session: Optional[SessionProxy], request: ResourceHandlerRequest, callback_context: MutableMapping[str, Any], ) -> ProgressEvent: model = request.desiredResourceState aws_account = build_aws_account_from_model(model) with v1_client( model.DatadogCredentials.ApiKey, model.DatadogCredentials.ApplicationKey, model.DatadogCredentials.ApiURL, TELEMETRY_TYPE_NAME, __version__, ) as api_client: api_instance = AWSIntegrationApi(api_client) try: api_instance.delete_aws_account(aws_account) except ApiException as e: LOG.error( "Exception when calling AWSIntegrationApi->delete_aws_account: %s\n", e) return ProgressEvent(status=OperationStatus.FAILED, resourceModel=model, message=f"Error deleting AWS account: {e}") return ProgressEvent( status=OperationStatus.SUCCESS, resourceModel=None, )
def update_handler( session: Optional[SessionProxy], request: ResourceHandlerRequest, callback_context: MutableMapping[str, Any], ) -> ProgressEvent: model = request.desiredResourceState progress: ProgressEvent = ProgressEvent( status=OperationStatus.IN_PROGRESS, resourceModel=model, ) LOG.info(f"Starting the {TYPE_NAME} Update Handler") with v1_client( model.DatadogCredentials.ApiKey, model.DatadogCredentials.ApplicationKey, model.DatadogCredentials.ApiURL or "https://api.datadoghq.com", TELEMETRY_TYPE_NAME, __version__, ) as api_client: api_instance = users_api.UsersApi(api_client) body = User( access_role=AccessRole(model.AccessRole), email=model.Email, disabled=model.Disabled or False, name=model.Name, ) try: api_instance.update_user(model.Handle, body) except ApiException as e: LOG.error("Exception when calling UsersApi->update_user: %s\n" % e) return ProgressEvent(status=OperationStatus.FAILED, resourceModel=model, message=e.body) return read_handler(session, request, callback_context)
def delete_handler( session: Optional[SessionProxy], request: ResourceHandlerRequest, callback_context: MutableMapping[str, Any], ) -> ProgressEvent: LOG.info("Starting %s Delete Handler", TYPE_NAME) model = request.desiredResourceState type_configuration = request.typeConfiguration with v1_client( type_configuration.DatadogCredentials.ApiKey, type_configuration.DatadogCredentials.ApplicationKey, type_configuration.DatadogCredentials.ApiURL, TELEMETRY_TYPE_NAME, __version__, ) as api_client: api_instance = MonitorsApi(api_client) try: api_instance.delete_monitor(model.Id) except ApiException as e: LOG.exception( "Exception when calling MonitorsApi->delete_monitor: %s\n", e) return ProgressEvent( status=OperationStatus.FAILED, resourceModel=model, message=f"Error deleting monitor: {e}", errorCode=http_to_handler_error_code(e.status), ) return ProgressEvent( status=OperationStatus.SUCCESS, resourceModel=None, )
def pre_update_handler( session: Optional[SessionProxy], request: BaseHookHandlerRequest, callback_context: MutableMapping[str, Any], type_configuration: TypeConfigurationModel) -> ProgressEvent: target_model = request.hookContext.targetModel progress: ProgressEvent = ProgressEvent(status=OperationStatus.IN_PROGRESS) target_name = request.hookContext.targetName try: LOG.debug("Hook context:") LOG.debug(request.hookContext) # Reading the Resource Hook's target new properties resource_properties = target_model.get("resourceProperties") # Only need to check if the new resource properties match the required TypeConfiguration. # This will block automatically if they are trying to remove a permission boundary. if "AWS::S3::Bucket" == target_name: progress = _validate_block_public_access( target_name, resource_properties, type_configuration.excludedBucketSuffixes) else: raise exceptions.InvalidRequest( f"Unknown target type: {target_name}") except exceptions.InvalidRequest as e: progress.status = OperationStatus.FAILED progress.message = "Unknown target type: {target_name}" except BaseException as e: progress = ProgressEvent.failed(HandlerErrorCode.InternalFailure, f"Unexpected error {e}") return progress
def delete_handler( session: Optional[SessionProxy], request: ResourceHandlerRequest, callback_context: MutableMapping[str, Any], ) -> ProgressEvent: LOG.info("Starting %s Delete Handler", TYPE_NAME) model = request.desiredResourceState dashboard_id = model.Id with v1_client( model.DatadogCredentials.ApiKey, model.DatadogCredentials.ApplicationKey, model.DatadogCredentials.ApiURL, TELEMETRY_TYPE_NAME, __version__, ) as api_client: api_instance = DashboardsApi(api_client) try: api_instance.delete_dashboard(dashboard_id) except ApiException as e: LOG.error( "Exception when calling DashboardsApi->delete_dashboard: %s\n", e) return ProgressEvent(status=OperationStatus.FAILED, resourceModel=model, message=f"Error deleting dashboard: {e}") return ProgressEvent( status=OperationStatus.SUCCESS, resourceModel=None, )
def read_handler( session: Optional[SessionProxy], request: ResourceHandlerRequest, callback_context: MutableMapping[str, Any], ) -> ProgressEvent: LOG.info("Starting %s Read Handler", TYPE_NAME) model = request.desiredResourceState dashboard_id = model.Id with v1_client( model.DatadogCredentials.ApiKey, model.DatadogCredentials.ApplicationKey, model.DatadogCredentials.ApiURL, TELEMETRY_TYPE_NAME, __version__, ) as api_client: api_instance = DashboardsApi(api_client) try: dash = api_instance.get_dashboard(dashboard_id) json_dict = ApiClient.sanitize_for_serialization(dash) model.DashboardDefinition = json.dumps(json_dict) except ApiException as e: LOG.error( "Exception when calling DashboardsApi->get_dashboard: %s\n", e) return ProgressEvent(status=OperationStatus.FAILED, resourceModel=model, message=f"Error getting dashboard: {e}") return ProgressEvent( status=OperationStatus.SUCCESS, resourceModel=model, )
def pre_handler(_s, request: HookHandlerRequest, _c, type_configuration: TypeConfigurationModel) -> ProgressEvent: LOG.setLevel(logging.DEBUG) LOG.debug(f"request: {request.__dict__}") LOG.debug( f"type_configuration: {type_configuration.__dict__ if type_configuration else dict()}" ) cfn_model = request.hookContext.targetModel.get("resourceProperties", {}) # If we get a type that we don't care about, we should return InvalidRequest if request.hookContext.targetName not in supported_types: LOG.error("returning invalidRequest") return ProgressEvent( status=OperationStatus.FAILED, errorCode=HandlerErrorCode.InvalidRequest, message=f"This hook only supports {supported_types}") # object structure is strangely complex, simplifying down to a set of strings defined_log_types = cfn_model.get('Logging', {}).get("ClusterLogging", {}).get("EnabledTypes", []) defined_log_types = {v['Type'] for v in defined_log_types} is_compliant = required_log_types.issubset(defined_log_types) # Fail if log types required are not in the log types set if not is_compliant: return non_compliant( f"EKS Clusters should have at least {required_log_types} set") # Operation is compliant, return success LOG.debug("returning SUCCESS") return ProgressEvent(status=OperationStatus.SUCCESS)
def put_event_type_and_return_progress(frauddetector_client, model, progress: ProgressEvent): try: if hasattr(model, "Tags"): tags = model_helpers.get_tags_from_tag_models(model.Tags) put_event_type_func = partial( api_helpers.call_put_event_type, frauddetector_client=frauddetector_client, event_type_tags=tags, ) else: put_event_type_func = partial( api_helpers.call_put_event_type, frauddetector_client=frauddetector_client, ) put_event_type_for_model(put_event_type_func, model) progress.resourceModel = model_helpers.get_event_type_and_return_model( frauddetector_client, model) progress.status = OperationStatus.SUCCESS LOG.debug( f"just finished a put event_type call for event type: {progress.resourceModel.Name}" ) except RuntimeError as e: raise exceptions.InternalFailure(f"Error occurred: {e}") LOG.info(f"Returning Progress with status: {progress.status}") return progress
def pre_create_handler( session: Optional[SessionProxy], request: HookHandlerRequest, callback_context: MutableMapping[str, Any], type_configuration: TypeConfigurationModel) -> ProgressEvent: progress: ProgressEvent = ProgressEvent(status=OperationStatus.IN_PROGRESS) target_name = request.hookContext.targetName try: LOG.debug("Hook context:") LOG.debug(request.hookContext) if "AWS::S3::Bucket" == target_name: progress = _validate_block_public_access( target_name, request.hookContext.targetModel.get("resourceProperties"), type_configuration.excludedBucketSuffixes) else: raise exceptions.InvalidRequest( f"Unknown target type: {target_name}") except exceptions.InvalidRequest as e: progress.status = OperationStatus.FAILED progress.message = "Unknown target type: {target_name}" except BaseException as e: progress = ProgressEvent.failed(HandlerErrorCode.InternalFailure, f"Unexpected error {e}") return progress
def read_handler( session: Optional[SessionProxy], request: ResourceHandlerRequest, callback_context: MutableMapping[str, Any], ) -> ProgressEvent: model = request.desiredResourceState LOG.info(f"Starting the {TYPE_NAME} Delete Handler") with v1_client( model.DatadogCredentials.ApiKey, model.DatadogCredentials.ApplicationKey, model.DatadogCredentials.ApiURL or "https://api.datadoghq.com", TELEMETRY_TYPE_NAME, __version__, ) as api_client: api_instance = users_api.UsersApi(api_client) user_handle = model.Handle try: api_response = api_instance.get_user(user_handle) except ApiException as e: LOG.error("Exception when calling UsersApi->get_user: %s\n" % e) return ProgressEvent(status=OperationStatus.FAILED, resourceModel=model, message=e.body) model.AccessRole = api_response.user.access_role.value model.Name = api_response.user.name model.Disabled = api_response.user.disabled model.Verified = api_response.user.verified model.Email = api_response.user.email return ProgressEvent( status=OperationStatus.SUCCESS, resourceModel=model, )
def delete_handler( session: Optional[SessionProxy], request: ResourceHandlerRequest, callback_context: MutableMapping[str, Any], ) -> ProgressEvent: model = request.desiredResourceState progress: ProgressEvent = ProgressEvent( status=OperationStatus.IN_PROGRESS, resourceModel=model, ) LOG.info(f"Starting the {TYPE_NAME} Delete Handler") with v1_client( model.DatadogCredentials.ApiKey, model.DatadogCredentials.ApplicationKey, model.DatadogCredentials.ApiURL or "https://api.datadoghq.com", TELEMETRY_TYPE_NAME, __version__, ) as api_client: api_instance = users_api.UsersApi(api_client) user_handle = model.Handle try: api_instance.disable_user(user_handle) except ApiException as e: LOG.error("Exception when calling UsersApi->disable_user: %s\n" % e) return ProgressEvent(status=OperationStatus.FAILED, resourceModel=model, message=e.body) return ProgressEvent( status=OperationStatus.SUCCESS, resourceModel=None, )
def update_handler( session: Optional[SessionProxy], request: ResourceHandlerRequest, callback_context: MutableMapping[str, Any], ) -> ProgressEvent: LOG.info("Starting %s Update Handler", TYPE_NAME) model = request.desiredResourceState type_configuration = request.typeConfiguration try: json_payload = json.loads(model.DashboardDefinition) except ValueError as e: LOG.exception("Exception parsing dashboard payload: %s\n", e) return ProgressEvent( status=OperationStatus.FAILED, resourceModel=model, message=f"Error parsing dashboard payload: {e}", errorCode=HandlerErrorCode.InternalFailure, ) dashboard_id = model.Id with v1_client( type_configuration.DatadogCredentials.ApiKey, type_configuration.DatadogCredentials.ApplicationKey, type_configuration.DatadogCredentials.ApiURL, TELEMETRY_TYPE_NAME, __version__, ) as api_client: api_instance = DashboardsApi(api_client) try: # Get raw http response with _preload_content False api_instance.update_dashboard(dashboard_id, json_payload, _check_input_type=False, _preload_content=False) except TypeError as e: LOG.exception( "Exception when deserializing the Dashboard payload definition: %s\n", e) return ProgressEvent( status=OperationStatus.FAILED, resourceModel=model, message=f"Error deserializing dashboard: {e}", errorCode=HandlerErrorCode.InternalFailure, ) except ApiException as e: LOG.exception( "Exception when calling DashboardsApi->update_dashboard: %s\n", e) return ProgressEvent(status=OperationStatus.FAILED, resourceModel=model, message=f"Error updating dashboard: {e}", errorCode=http_to_handler_error_code( e.status)) return read_handler(session, request, callback_context)
def create_handler( session: Optional[SessionProxy], request: ResourceHandlerRequest, callback_context: MutableMapping[str, Any], ) -> ProgressEvent: LOG.info("Starting %s Create Handler", TYPE_NAME) model = request.desiredResourceState try: json_payload = json.loads(model.DashboardDefinition) except json.JSONDecodeError as e: LOG.error("Exception when loading the Dashboard JSON definition: %s\n", e) return ProgressEvent( status=OperationStatus.FAILED, resourceModel=model, message=f"Error loading Dashboard JSON definition: {e}") with v1_client( model.DatadogCredentials.ApiKey, model.DatadogCredentials.ApplicationKey, model.DatadogCredentials.ApiURL, TELEMETRY_TYPE_NAME, __version__, ) as api_client: api_instance = DashboardsApi(api_client) try: # Call the deserialization function of the python client. # It expects the loaded JSON payload, the python client type of the model, # some path to the data (not sure what this one does), # whether or not the payload is a server payload, so true in our case, # whether or not to do type conversion, true in our case too # and importantly the api_client configuration, needed to perform the type conversions dashboard = validate_and_convert_types( json_payload, (Dashboard, ), ["resource_data"], True, True, configuration=api_client.configuration) res = api_instance.create_dashboard(dashboard) model.Id = res.id except TypeError as e: LOG.error( "Exception when deserializing the Dashboard payload definition: %s\n", e) return ProgressEvent(status=OperationStatus.FAILED, resourceModel=model, message=f"Error deserializing dashboard: {e}") except ApiException as e: LOG.error( "Exception when calling DashboardsApi->create_dashboard: %s\n", e) return ProgressEvent(status=OperationStatus.FAILED, resourceModel=model, message=f"Error creating dashboard: {e}") return read_handler(session, request, callback_context)
def execute_list_detector_handler_work(session: SessionProxy, model: models.ResourceModel, progress: ProgressEvent): afd_client = client_helpers.get_afd_client(session) try: detector_models = list_worker_helpers.list_detector_models(afd_client) except RuntimeError as e: raise exceptions.InternalFailure(f"Error occurred: {e}") progress.resourceModels = detector_models progress.status = OperationStatus.SUCCESS LOG.info(f"Returning Progress with status: {progress.status}") return progress
def handle_delete(agw_client: APIGatewayClient, model: ResourceModel, callback_context: MutableMapping[str, Any]) -> ProgressEvent: if callback_context.get("blue-status") is None: agw_client.delete_stage(restApiId=model.RestApiId, stageName="blue") callback_context["blue-status"] = DELETED return ProgressEvent( status=OperationStatus.IN_PROGRESS, resourceModel=model, callbackContext=callback_context, ) else: agw_client.delete_stage(restApiId=model.RestApiId, stageName="green") return ProgressEvent(status=OperationStatus.SUCCESS, )
def create_handler( session: Optional[SessionProxy], request: ResourceHandlerRequest, callback_context: MutableMapping[str, Any], ) -> ProgressEvent: model = request.desiredResourceState progress: ProgressEvent = ProgressEvent( status=OperationStatus.IN_PROGRESS, resourceModel=model, callbackContext=callback_context if callback_context is not None else {} ) try: req_payload = { 'S3TrainingDataPath': model.S3TrainingDataPath, 'TargetColumnName': model.TargetColumnName, 'NotificationEmail': model.NotificationEmail, 'WorkflowName': model.WorkflowName, } # auth = HTTPBasicAuth('API_KEY', '') LOG.info(f"Creating workflow ${model.WorkflowName}") # resuming from a long CREATE operation if 'DEPLOY_ID' in callback_context: req_payload['DeployId'] = callback_context['DEPLOY_ID'] req = http.request( 'POST', url=CD4AUTO_ML_API, headers=HTTP_REQUEST_HEADER, body=json.dumps(req_payload) ) payload = json.loads(req.data) deploy_status = payload['DeployStatus'] progress.callbackContext['DEPLOY_ID'] = payload['DeployId'] progress.callbackDelaySeconds = 20 progress.status = OperationStatus.IN_PROGRESS if deploy_status in ('FAILED', 'FAULT', 'STOPPED', 'TIMED_OUT'): LOG.error(f"Workflow ${model.WorkflowName} creation failed with status ${deploy_status}") progress.status = OperationStatus.FAILED elif deploy_status == 'IN_PROGRESS': model.InferenceApi = payload.get('ApiUri', 'MyTestUrl') LOG.info(f"Created workflow ${model.WorkflowName} successfully") progress.status = OperationStatus.IN_PROGRESS else: progress.status = OperationStatus.SUCCESS return progress except TypeError as e: # exceptions module lets CloudFormation know the type of failure that occurred LOG.error(f"Workflow creation failed with status ${e}") raise exceptions.InternalFailure(f"was not expecting type {e}") # this can also be done by returning a failed progress event # return ProgressEvent.failed(HandlerErrorCode.InvalidRequest, f"was not expecting type {e}") return ProgressEvent(status=OperationStatus.SUCCESS, resourceModel=model, message="Workflow created")
def read_handler( session: Optional[SessionProxy], request: ResourceHandlerRequest, callback_context: MutableMapping[str, Any], ) -> ProgressEvent: model = request.desiredResourceState LOG.info(f"Starting the {TYPE_NAME} Read Handler") with v1_client( model.DatadogCredentials.ApiKey, model.DatadogCredentials.ApplicationKey, model.DatadogCredentials.ApiURL, TELEMETRY_TYPE_NAME, __version__, ) as api_client: api_instance = downtimes_api.DowntimesApi(api_client) try: api_resp = api_instance.get_downtime(model.Id) except ApiException as e: LOG.error( "Exception when calling DowntimeApi->get_downtime: %s\n" % e) return ProgressEvent(status=OperationStatus.FAILED, resourceModel=model, message=e.body) LOG.info(f"Success retrieving downtime {api_resp}") # Add hasattr checks for non-nullable fields to ensure they're available to be set # Currently in datadog-api-client-python, accessing fields that don't exist return an AttributeError if hasattr(api_resp, 'message'): model.Message = api_resp.message if hasattr(api_resp, 'monitor_tags'): model.MonitorTags = api_resp.monitor_tags if hasattr(api_resp, 'scope'): model.Scope = api_resp.scope if hasattr(api_resp, 'timezone'): model.Timezone = api_resp.timezone if hasattr(api_resp, 'start'): model.Start = api_resp.start # Nullable fields, these should be None or set as a value if api_resp.end: model.End = api_resp.end if api_resp.monitor_id: model.MonitorId = api_resp.monitor_id return ProgressEvent( status=OperationStatus.SUCCESS, resourceModel=model, )
def execute_read_detector_handler_work(session: SessionProxy, model: models.ResourceModel, progress: ProgressEvent): afd_client = client_helpers.get_afd_client(session) # read requests only include primary identifier (Arn). Extract DetectorId from Arn if not model.DetectorId: model.DetectorId = model.Arn.split("/")[-1] model = read_worker_helpers.validate_detector_exists_and_return_detector_resource_model( afd_client, model) progress.resourceModel = model progress.status = OperationStatus.SUCCESS LOG.info(f"Returning Progress with status: {progress.status}") return progress
def delete_handler( session: Optional[SessionProxy], request: ResourceHandlerRequest, callback_context: MutableMapping[str, Any], ) -> ProgressEvent: """ [DELETE] Handler :param session: Boto SessionProxy :param model: Resource model :param callback_context: Value Mapping set when the handler returns status=IN_PROGRESS and needs more processing. :return ProgressEvent: """ model = request.desiredResourceState LOG.info('[DELETE] Entering DELETE handler') model.OpenShiftInstallBinary = model.OpenShiftInstallBinary or DEFAULT_INSTALL_BINARY model.OpenShiftVersion = model.OpenShiftVersion or DEFAULT_VERSION model.OpenShiftMirrorURL = model.OpenShiftMirrorURL or DEFAULT_MIRROR_URL model.OpenShiftClientBinary = model.OpenShiftClientBinary or DEFAULT_CLIENT_BINARY LOG.setLevel(model.LogLevel or "INFO") LOG.debug('[DELETE] Current model state %s', model) read_kwargs = fetch_resource(model, session) if model.Action == 'BOOTSTRAP' else fetch_kube_parameters(model, session) if read_kwargs['status'] == OperationStatus.FAILED: return ProgressEvent(**read_kwargs) model = read_kwargs['resourceModel'] try: if model.Action == 'BOOTSTRAP': event_kwargs = bootstrap_delete(model, session) elif model.Action == 'GENERATE_IGNITION': event_kwargs = generate_ignition_delete(model, session) else: raise AttributeError('Action: %s is not a valid action', model.Action) return ProgressEvent(**event_kwargs) except AttributeError as e: return ProgressEvent( status=OperationStatus.FAILED, errorCode=HandlerErrorCode.InvalidRequest, message=f"Operation failed because the parameters were invalid: {e}" ) except BaseException as e: return ProgressEvent( status=OperationStatus.FAILED, errorCode=HandlerErrorCode.InternalFailure, message=f"Operation failed due to an internal problem: {e} . Check the logs for more information" )
def create_handler( session: Optional[SessionProxy], request: ResourceHandlerRequest, callback_context: MutableMapping[str, Any], ) -> ProgressEvent: model = request.desiredResourceState progress: ProgressEvent = ProgressEvent( status=OperationStatus.IN_PROGRESS, resourceModel=model, ) ensure_tracking_table_exists(session) executionId = str(uuid.uuid4()) model.ExecutionId = executionId try: execute_changes(session, model, {}) ddbclient = session.client('dynamodb') ddbclient.put_item(TableName=TRACKING_TABLE_NAME, Item={ 'executionId': { 'S': executionId }, 'lastUpdated': { 'N': str(int(time.time())) }, 'clusterArn': { 'S': model.ClusterArn }, 'sqlHistory': generate_sql_history_block(model) }) except Exception as e: raise exceptions.InternalFailure(f"{e}") model.ClusterArn = None model.SecretArn = None model.Databases = None model.SQL = None model.Users = None model.SQLIdempotency = None return ProgressEvent( status=OperationStatus.SUCCESS, resourceModel=model, )
def create_and_attach_instance_profile(obj: ProvisioningStatus, request: ResourceHandlerRequest, callback_context: MutableMapping[str, Any], session: SessionProxy): LOG.info( "starting ATTACH_INSTANCE_PROFILE with callback_context\n{}\nand request\n{}" .format(callback_context, request)) progress: ProgressEvent = ProgressEvent( status=OperationStatus.IN_PROGRESS, resourceModel=request.desiredResourceState, callbackContext=callback_context, callbackDelaySeconds=30) instance_profile_arn = callback_context["INSTANCE_PROFILE_ARN"] instance_id = callback_context["INSTANCE_ID"] try: ec2_client = session.client('ec2') associate_profile_response = ec2_client.associate_iam_instance_profile( IamInstanceProfile={'Arn': instance_profile_arn}, InstanceId=instance_id) callback_context["ASSOCIATION_ID"] = associate_profile_response[ 'IamInstanceProfileAssociation']['AssociationId'] progress.callbackContext["LOCAL_STATUS"] = ProfileAttached() except Exception as e: print(e) LOG.info( "returning progress from ATTACH_INSTANCE_PROFILE {}".format(progress)) return progress
def create(obj: ProvisioningStatus, request: ResourceHandlerRequest, callback_context: MutableMapping[str, Any], session: SessionProxy): LOG.info("starting NEW RESOURCE with request\n{}".format(request)) progress: ProgressEvent = ProgressEvent( status=OperationStatus.IN_PROGRESS, resourceModel=request.desiredResourceState, callbackContext=callback_context, callbackDelaySeconds=15) env_name = get_name_from_request(request) if request.desiredResourceState.OwnerArn: owner_arn = request.desiredResourceState.OwnerArn else: owner_arn = "arn:aws:iam::{}:root".format( session.client('sts').get_caller_identity()['Account']) response = session.client('cloud9').create_environment_ec2( ownerArn=owner_arn, name=env_name, instanceType=request.desiredResourceState.InstanceType, automaticStopTimeMinutes=30) LOG.info("environment id: {}".format(response['environmentId'])) model: ResourceModel = request.desiredResourceState model.Name = env_name model.EnvironmentId = response['environmentId'] progress.callbackContext["ENVIRONMENT_NAME"] = env_name progress.callbackContext["ENVIRONMENT_ID"] = response['environmentId'] progress.callbackContext["LOCAL_STATUS"] = EnvironmentCreated() progress.status = OperationStatus.IN_PROGRESS progress.message = "Cloud9 Environment created" return progress
def handle_A(obj: ProvisioningStatus, request: ResourceHandlerRequest, callback_context: MutableMapping[str, Any], session: SessionProxy): LOG.info( "starting INSTANCE_STABLE with callback_context\n{}\nand request\n{}". format(callback_context, request)) progress: ProgressEvent = ProgressEvent( status=OperationStatus.IN_PROGRESS, resourceModel=request.desiredResourceState, callbackContext=callback_context, callbackDelaySeconds=0) instance_id = callback_context["INSTANCE_ID"] try: ec2_client = session.client("ec2") if request.desiredResourceState.EBSVolumeSize: resize_ebs(instance_id, int(request.desiredResourceState.EBSVolumeSize), ec2_client) progress.callbackContext["LOCAL_STATUS"] = ResizedInstance() progress.message = "Resized EBS Volume" except Exception as e: LOG.info('Can\'t resize instance: {}'.format(e)) raise (e) LOG.info("returning progress from INSTANCE_STABLE {}".format(progress)) return progress
def non_compliant(msg): LOG.debug(f"returning FAILED: {HandlerErrorCode.NonCompliant} {msg}") return ProgressEvent( status=OperationStatus.FAILED, errorCode=HandlerErrorCode.NonCompliant, message=msg )