def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id) # IAM Group hands_on_cdk_developers = iam.Group(self, "HandsOnCdkDevelopers", group_name="HandsOnCdkDevelopers") groups_map = {"HandsOnCdk": hands_on_cdk_developers} # IAM User for user in read_config_users(): iam.User( self, f"{user['group']}Developer-{user['first_name']}{user['last_name']}", user_name= f"{user['group']}Developer-{user['first_name']}{user['last_name']}", password=core.SecretValue.plain_text(user['password']), groups=[groups_map[user["group"]]]) # IAM Role iam.Role(self, "HandsOnCdkDevelopers-Role-PowerUserAccess", role_name="HandsOnCdkDevelopers-Role-PowerUserAccess", assumed_by=iam.AccountPrincipal( core.ScopedAws(scope).account_id), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "PowerUserAccess") ]) # IAM Policy iam.Policy( self, "HandsOnCdkDevelopers-Policy-SourceMfaRestriction", policy_name="HandsOnCdkDevelopers-Policy-SourceMfaRestriction", force=True, groups=[hands_on_cdk_developers], statements=[ statement for statement in read_config_source_mfa_restriction() ]) iam.Policy(self, "HandsOnCdkDevelopers-Policy-OnlySwitchRole", policy_name="HandsOnCdkDevelopers-Policy-OnlySwitchRole", force=True, groups=[hands_on_cdk_developers], statements=[ statement for statement in read_config_only_switch_role() ])
def add_policy(self): """ Policy that allows DMS to save to S3 """ policy = iam.Policy( self, id=f"iam-{self.deploy_env.value}-data-lake-raw-dms-policy", policy_name=f"iam-{self.deploy_env.value}-data-lake-raw-dms-policy", statements=[ iam.PolicyStatement( actions=[ "s3:PutObject", "s3:DeleteObject", "s3:ListBucket", ], resources=[ self.data_lake_bronze_bucket.bucket_arn, f"{self.data_lake_bronze_bucket.bucket_arn}/*", ], ) ], ) self.attach_inline_policy(policy) return policy
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) user = Iam.User(self, "brev-user") policy_json = get_role_policy_json() policy_document = Iam.PolicyDocument.from_json(policy_json) role = Iam.Role( self, "brev-role", inline_policies={"brev-policy": policy_document}, assumed_by=Iam.ServicePrincipal("sagemaker.amazonaws.com")) policy_statements = get_user_policy_statements() allow_role_statement = Iam.PolicyStatement( actions=["iam:GetRole", "iam:PassRole"], effect=Iam.Effect.ALLOW, resources=[role.role_arn]) policy_statements.append(allow_role_statement) policy = Iam.Policy(self, "brev-policy", statements=policy_statements) policy.attach_to_user(user) access_key = Iam.CfnAccessKey(self, 'brev_access_key', user_name=user.user_name) core.CfnOutput(self, "access_key_id", value=access_key.ref) core.CfnOutput(self, "secret_access_key", value=access_key.attr_secret_access_key) core.CfnOutput(self, "iam_role", value=role.role_name)
def forecast_access_policy(self, name): policy = iam.Policy( self.owner, f"{name}ForecastAccess", statements=[ iam.PolicyStatement( actions=[ "forecast:Describe*", "forecast:List*", "forecast:Create*", "forecast:Update*", "forecast:TagResource", ], resources=["*"], ) ], ) add_cfn_nag_suppressions( policy.node.default_child, [ CfnNagSuppression( "W12", "Require access to all resources; Not all Amazon Forecast resources support resource based policy", ) ], ) return policy
def generate_codebuild_policy(scope, db_secret_arn): return iam.Policy( scope=scope, id="JVSANTOSTier1CodebuildPolicy", policy_name="JVSANTOS-codebuild-policy", statements=[ PolicyStatement(actions=["secretsmanager:GetSecretValue"], resources=[db_secret_arn], effect=Effect.ALLOW), PolicyStatement(actions=[ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents" ], resources=["*"], effect=Effect.ALLOW), PolicyStatement(actions=[ "codebuild:CreateReportGroup", "codebuild:CreateReport", "codebuild:UpdateReport", "codebuild:BatchPutTestCases", "codebuild:BatchPutCodeCoverages" ], resources=["*"], effect=Effect.ALLOW), PolicyStatement(actions=[ "s3:GetObject*", "s3:GetBucket*", "s3:List*", "s3:DeleteObject*", "s3:PutObject", "s3:Abort*" ], resources=["*"], effect=Effect.ALLOW), PolicyStatement(actions=["codestar-connections:UseConnection"], resources=["*"], effect=Effect.ALLOW) ])
def base_service_role(construct, resource_name: str, principal_resource: str, actions: list, resources: list): """ Function that generates an IAM Service Role with a Policy. :param construct: Custom construct that will use this function. From the external construct is usually 'self'. :param resource_name: Name of the resource. Used for naming purposes. :param principal_resource: Resource used to define a Service Principal. Has to match an AWS Resource. For example, 'iot' -> 'iot.amazonaws.com'. :param actions: Action list containing AWS IAM defined actions. For example 'sns:Publish' :param resources: List of resources ARNs defined by AWS. :return: IAM Service Role with an IAM Policy attached. """ try: # Defining IAM Role # Defining Service Principal iam_role_name = construct.prefix + "_role_" + resource_name + "_" + construct.environment_ iam_policy_name = construct.prefix + "_policy_" + resource_name + "_" + construct.environment_ principal = iam.ServicePrincipal(service=f"{principal_resource}.amazonaws.com") # Defining IAM Role role = iam.Role(construct, id=iam_role_name, role_name=iam_role_name, assumed_by=principal) # Defining Policy Statement, Policy and Attaching to Role policy_statements = iam.PolicyStatement(actions=actions, resources=resources) policy = iam.Policy(construct, id=iam_policy_name, policy_name=iam_policy_name, statements=[policy_statements]) policy.attach_to_role(role=role) except Exception: print(traceback.format_exc()) else: return role
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) self._notebook_role = iam_.Role( self, "notebookAccessRole", role_name="notebookAccessRole", assumed_by=iam_.ServicePrincipal('sagemaker')) self._notebook_policy = iam_.Policy( self, "notebookAccessPolicy", policy_name="notebookAccessPolicy", statements=[ iam_.PolicyStatement(actions=['s3:*'], resources=['*']), iam_.PolicyStatement(actions=["logs:*"], resources=['*']), iam_.PolicyStatement(actions=["sagemaker:*"], resources=['*']), iam_.PolicyStatement(actions=["ecr:*"], resources=['*']), iam_.PolicyStatement(actions=[ "iam:GetRole", "iam:PassRole", "s3:ListBucket", "s3:PutObject", "s3:GetObject", "s3:DeleteObject", "sts:GetSessionToken", "sts:GetAccessKeyInfo", "sts:GetCallerIdentity", "sts:GetServiceBearerToken", "sts:DecodeAuthorizationMessage", "sts:AssumeRole" ], resources=['*']) ]).attach_to_role(self._notebook_role)
def add_policy(self): policy = iam.Policy( self, id=f"iam-{self.deploy_env.value}-data-lake-raw-firehose-policy", policy_name= f"iam-{self.deploy_env.value}-data-lake-raw-firehose-policy", statements=[ iam.PolicyStatement( actions=[ "s3:AbortMultipartUpload", "s3:GetBucketLocation", "s3:GetObject", "s3:ListBucket", "s3:ListBucketMultipartUploads", "s3:PutObject", ], resources=[ self.data_lake_raw_bucket.bucket_arn, f"{self.data_lake_raw_bucket.bucket_arn}/*", ], ) ], ) self.attach_inline_policy(policy) return policy
def _create_roles_for_set_experiment_info_env_step(self): """ Create Roles for Set Experiment Info Env Step """ self.lambda_experiment_info_role = iam.Role( self, "LambdaExperimentInfoRole", assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"), description="Role for Lambda to get experiments info at S3.", role_name=f"{self.name_prefix}-lambda-experiment-info-role", managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonS3ReadOnlyAccess") ], ) lambda_experiment_info_policy = iam.Policy( self, "LambdaExperimentInfoPolicy") lambda_experiment_info_policy.add_statements( iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=[ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents" ], resources=["*"])) self.lambda_experiment_info_role.attach_inline_policy( lambda_experiment_info_policy)
def __init__(self, scope: core.Construct, id: str, buildspec, **kwargs): super().__init__(scope, id, **kwargs) self.buildspec = buildspec self.build_image = codebuild.LinuxBuildImage.STANDARD_2_0 self.project = codebuild.PipelineProject( self, "Project", environment=codebuild.BuildEnvironment( build_image=self.build_image, privileged=True), build_spec=codebuild.BuildSpec.from_source_filename( self.buildspec), environment_variables={ 'REPO_NAME': codebuild.BuildEnvironmentVariable( value=config['CODEPIPELINE']['GITHUB_REPO']) }, ) # TODO: Don't need admin, let's make this least privilege self.admin_policy = iam.Policy( self, "AdminPolicy", roles=[self.project.role], statements=[iam.PolicyStatement( actions=['*'], resources=['*'], )])
def sagemaker_logs_metrics_policy_document(scope, id): policy = iam.Policy( scope, id, statements=[ iam.PolicyStatement( actions=[ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:DescribeLogStreams", "logs:GetLogEvents", "logs:PutLogEvents", ], resources=[ f"arn:{core.Aws.PARTITION}:logs:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:log-group:/aws/sagemaker/*" ], ), iam.PolicyStatement( actions=[ "cloudwatch:PutMetricData", ], resources=["*"], ), ], ) policy.node.default_child.cfn_options.metadata = suppress_cloudwatch_policy() return policy
def add_policy(self): policy = iam.Policy( self, id=f"iam-{self.deploy_env.value}-glue-data-lake-{self.layer.value}-policy", policy_name=f"iam-{self.deploy_env.value}-glue-data-lake-{self.layer.value}-policy", statements=[ iam.PolicyStatement( actions=["s3:ListBucket", "s3:GetObject", "s3:PutObject"], resources=[self.bucket_arn, f"{self.bucket_arn}/*"], ), iam.PolicyStatement( actions=["cloudwatch:PutMetricData"], resources=["arn:aws:cloudwatch:*"], ), iam.PolicyStatement(actions=["glue:*"], resources=["arn:aws:glue:*"]), iam.PolicyStatement( actions=[ "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents", ], resources=["arn:aws:logs:*:*:/aws-glue/*"], ), ], ) self.attach_inline_policy(policy)
def __init__(self, scope: Construct, id: str, context: "Context", team_context: "TeamContext", parameters: Dict[str, Any]) -> None: super().__init__( scope=scope, id=id, stack_name=id, env=Environment(account=context.account_id, region=context.region), ) Tags.of(scope=cast(IConstruct, self)).add( key="Env", value=f"orbit-{context.name}") if team_context.eks_pod_role_arn is None: raise ValueError("Pod Role arn required") team_role = iam.Role.from_role_arn( scope=self, id="team-role", role_arn=team_context.eks_pod_role_arn, mutable=True) team_role.attach_inline_policy(policy=iam.Policy( scope=self, id="emr_on_eks", policy_name="emr_on_eks", statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "emr-containers:StartJobRun", "emr-containers:ListJobRuns", "emr-containers:DescribeJobRun", "emr-containers:CancelJobRun", "emr-containers:TagResource", ], resources=[parameters.get("virtual_arn", "*")], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "logs:*", ], resources=[ f"arn:aws:logs:{context.region}:{context.account_id}:log-group:/orbit/emr/*", f"arn:aws:logs:{context.region}:{context.account_id}:log-group:/orbit/emr/*:log-stream:*", ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "emr-containers:Get*", "emr-containers:Describe*", "emr-containers:List*", "elasticmapreduce:CreatePersistentAppUI", "elasticmapreduce:DescribePersistentAppUI", "elasticmapreduce:GetPersistentAppUIPresignedURL", ], resources=["*"], ), ], ))
def ecr_policy_document(scope, id, repo_arn): ecr_policy = iam.Policy( scope, id, statements=[ iam.PolicyStatement( actions=[ "ecr:BatchCheckLayerAvailability", "ecr:GetDownloadUrlForLayer", "ecr:DescribeRepositories", "ecr:DescribeImages", "ecr:BatchGetImage", ], resources=[repo_arn], ), iam.PolicyStatement( actions=[ "ecr:GetAuthorizationToken", ], # it can not be bound to resources other than * resources=["*"], ), ], ) # add supression for * ecr_policy.node.default_child.cfn_options.metadata = suppress_ecr_policy() return ecr_policy
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Create the IAM User to Be the Data Analyst iam.User( self, id="datalake_user", managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonAthenaFullAccess") ], ).attach_inline_policy( iam.Policy( self, id="DatalakeUserBasic", statements=[ iam.PolicyStatement( actions=[ "lakeformation:GetDataAccess", "glue:GetTable", "glue:GetTables", "glue:SearchTables", "glue:GetDatabase", "glue:GetDatabases", "glue:GetPartitions", ], effect=iam.Effect.ALLOW, resources=["*"], ) ], )) # Create an Amazon S3 Bucket for the Data Lake _s3.Bucket(self, "cc-hiroga-datalake-cloudtrail")
def create_lambda_build_image(self) -> Resource: """Greengrassのコンポーネント用に推論アプリのdockerイメージをビルドするcodebuildを実行するLambda Returns: Resource: lambda """ lambdaFn_name = self.get_lambda_name("build_image") role_name = self.get_role_name("build_image") lambda_role = aws_iam.Role( self, id=role_name, assumed_by=aws_iam.ServicePrincipal("lambda.amazonaws.com"), role_name=role_name, path="/service-role/", managed_policies=[ aws_iam.ManagedPolicy.from_aws_managed_policy_name( "service-role/AWSLambdaBasicExecutionRole"), aws_iam.ManagedPolicy.from_aws_managed_policy_name( "AWSCodeBuildDeveloperAccess") ]) lambda_role.attach_inline_policy( aws_iam.Policy( self, "AllowDynamoDBAccess", document=aws_iam.PolicyDocument(statements=[ aws_iam.PolicyStatement(actions=[ "dynamodb:PutItem", "dynamodb:GetItem", "dynamodb:UpdateItem" ], resources=[self._table.table_arn]) ]))) lambdaFn_path = self.get_lambda_path("build_image") lambdaFn = aws_lambda.Function( self, id=lambdaFn_name, function_name=lambdaFn_name, code=aws_lambda.AssetCode(path=lambdaFn_path), handler="lambda_handler.handler", timeout=Duration.seconds(10), runtime=aws_lambda.Runtime.PYTHON_3_9, description="コンポーネント用のイメージを作成", role=lambda_role, environment={ "TABLE_NAME": self._table.table_name, "CODEBUILD_PROJECT_NAME": self._docker_image_buildproject.project_name, "COMPONENT_IMAGE_REPOSITORY": self._component_ecr.repository_name, "COMPONENT_APP_SOURCE_REPOSITORY": self._component_source_repository.repository_clone_url_grc, "COMPONENT_BASE_IMAGE_REPOSITORY": self._component_base_ecr.repository_uri }) self._table.grant_read_write_data(lambdaFn) return lambdaFn
def __init__( self, scope: core.Construct, data_lake_raw: BaseDataLakeBucket, data_lake_processed: BaseDataLakeBucket, **kwargs ) -> None: self.deploy_env = data_lake_raw.deploy_env self.data_lake_raw = data_lake_raw self.data_lake_processed = data_lake_processed super().__init__( scope, id=f"iam-{self.deploy_env.value}-redshift-spectrum-role", assumed_by=iam.ServicePrincipal("redshift.amazonaws.com"), description="Role to allow Redshift to access data lake using spectrum", ) policy = iam.Policy( scope, id=f"iam-{self.deploy_env.value}-redshift-spectrum-policy", policy_name=f"iam-{self.deploy_env.value}-redshift-spectrum-policy", statements=[ iam.PolicyStatement(actions=["glue:*", "athena:*"], resources=["*"]), iam.PolicyStatement( actions=["s3:Get*", "s3:List*", "s3:Put*"], resources=[ self.data_lake_raw.bucket_arn, f"{self.data_lake_raw.bucket_arn}/*", self.data_lake_processed.bucket_arn, f"{self.data_lake_processed.bucket_arn}/*", ], ), ], ) self.attach_inline_policy(policy)
def _create_apigw_to_sqs_role(self): _role = _iam.Role( self, 'ApiGwV2ToSqsRole', assumed_by=_iam.ServicePrincipal('apigateway.amazonaws.com')) _role.add_managed_policy( _iam.ManagedPolicy.from_managed_policy_arn( self, 'ApiGwPushCwPolicy', 'arn:aws:iam::aws:policy/service-role/'\ 'AmazonAPIGatewayPushToCloudWatchLogs' ) ) _role.attach_inline_policy( _iam.Policy(self, 'ApiGwV2ToSqsInlinePolicy', statements=[ _iam.PolicyStatement( actions=[ 'sqs:SendMessage', 'sqs:ReceiveMessage', 'sqs:PurgeQueue', 'sqs:DeleteMessage', ], resources=[self._queue.queue_arn]) ])) return _role
def __init__(self, scope: core.Construct, data_lake: DataLake, **kwargs) -> None: self.environment = data_lake.env.value super().__init__( scope, id=f'iam-{self.environment}-redshift-spectrum-role', assumed_by=iam.ServicePrincipal('redshift.amazonaws.com'), description= 'Role to allow Redshift to access data lake using spectrum', ) buckets_arns = [ data_lake.data_lake_raw_bucket.bucket_arn, data_lake.data_lake_processed_bucket.bucket_arn, data_lake.data_lake_curated_bucket.bucket_arn ] policy = iam.Policy( scope, id=f'iam-{self.environment}-redshift-spectrum-policy', policy_name=f'iam-{self.environment}-redshift-spectrum-policy', statements=[ iam.PolicyStatement(actions=['glue:*', 'athena:*'], resources=["*"]), iam.PolicyStatement(actions=['s3:Get*', 's3:List*', 's3:Put*'], resources=buckets_arns + [f'{arn}/*' for arn in buckets_arns]) ]) self.attach_inline_policy(policy)
def __init__(self, scope: _core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) ks_resource, cassandra_keyspace_arn = self.create_cassandra_keyspace( 'cassandra_demo') table_resource, cassandra_table_arn = self.create_cassandra_table( table_name='country_cities', keyspace_name='cassandra_demo', keyspace_ref=ks_resource.ref, partitionkey_columns=[{ 'ColumnName': 'country', 'ColumnType': 'TEXT', }], clustering_key_columns=[{ 'Column': { 'ColumnName': 'city_name', 'ColumnType': 'TEXT', }, 'OrderBy': 'ASC' }], regular_columns=[ { 'ColumnName': 'population', 'ColumnType': 'INT' }, ], ) user = _iam.User(self, 'CassandraDemoUser', user_name='CassandraDemoUser') policy = _iam.Policy(self, 'CassandraFullDataAccess') policy.add_statements( _iam.PolicyStatement( resources=[cassandra_table_arn], actions=['cassandra:Select', 'cassandra:Modify'])) policy.attach_to_user(user) secrets = _secretsmanager.Secret(self, 'cassandra_demo_creds', secret_name='cassandra_demo_creds') code = _lambda.Code.asset('lambda/.dist/lambda.zip') cassandra_function = _lambda.Function( self, 'cassandra-demo', function_name='cassandra-demo', runtime=_lambda.Runtime.PYTHON_3_6, memory_size=1024, code=code, handler='demo_handler.handler', tracing=_lambda.Tracing.ACTIVE, environment={'CASSANDRA_CREDS': secrets.secret_arn}, ) secrets.grant_read(cassandra_function) api = _apigateway.LambdaRestApi(self, 'cassandra-demo-api', handler=cassandra_function)
def create_iam(self, stream_arn ,user_name, policy_name): user = iam.User( self, "iam_user_stock_stream", user_name=user_name ) policy = iam.Policy( self, "iam_policy_stock_stream", policy_name=policy_name, statements=[ iam.PolicyStatement( resources=[ stream_arn ], actions=[ "kinesis:DescribeStream", "kinesis:PutRecord", "kinesis:PutRecords", "kinesis:GetShardIterator", "kinesis:GetRecords", "kinesis:ListShards", "kinesis:DescribeStreamSummary", "kinesis:RegisterStreamConsumer" ], ), iam.PolicyStatement( resources=[ stream_arn + "/*" ], actions=[ "kinesis:SubscribeToShard", "kinesis:DescribeStreamConsumer" ], ), iam.PolicyStatement( resources=[ # dynamodbのテーブルができたら修正するとりあえず "*" ], actions=[ "dynamodb:*" ], ), iam.PolicyStatement( resources=[ "*" ], actions=[ "cloudwatch:PutMetricData" ], ) ], users = [ user ] )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) sageMakerPrincipal = iam.ServicePrincipal('sagemaker.amazonaws.com') role = iam.Role( self, 'WorkshopRole', assumed_by = sageMakerPrincipal, role_name = 'amazon-sagemaker-in-practice-workshop-role' ) managed_policy_arn = 'arn:aws:iam::aws:policy/AmazonSageMakerFullAccess' role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name(managed_policy_arn)) participants_group = iam.Group(self, 'WorkshopParticipantsGroup'); policy = iam.Policy(self, 'WorkshopParticipantsPolicy'); permissions = [ "sagemaker:*", "ecr:*", "cloudwatch:*", "logs:*", "s3:GetBucketLocation", "s3:ListAllMyBuckets", "iam:ListRoles", "iam:GetRole" ]; defaultStatement = iam.PolicyStatement(effect=iam.Effect.ALLOW) defaultStatement.add_all_resources() defaultStatement.add_actions(*permissions) value = { 'iam:PassedToService': sageMakerPrincipal.to_string() } passRole = iam.PolicyStatement(effect=iam.Effect.ALLOW) passRole.add_all_resources() passRole.add_actions("iam:PassRole") passRole.add_condition("StringEquals", value) policy.add_statements(defaultStatement, passRole); participants_group.attach_inline_policy(policy); existing_bucket_arn = 'arn:aws:s3:::existing-bucket-for-workshop' data_source = s3.Bucket.from_bucket_arn(self, 'DataSourceBucket', existing_bucket_arn) data_source.grant_read(participants_group) amount = kwargs.get("env").get("participants_count") password = kwargs.get("env").get("password") ParticipantsConstruct( self, "WorkshopParticipantsConstruct", num=amount, password=password, group=participants_group )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) bucket = get_bucket(self, f"{id}-bucket", core.Duration.days(amount=7)) cluster, vpc = get_fargate_cluster(self, id) mem_limit = "512" task = get_fargate_task(self, id, mem_limit) aws_iam.Policy( self, f"{id}-bucket-policy", roles=[task.task_role], statements=[ aws_iam.PolicyStatement(actions=["s3:PutObject"], resources=[f"{bucket.bucket_arn}/*"]), ], ) get_fargate_container( self, id, task, mem_limit, { 'S3_BUCKET': bucket.bucket_name, 'MONGODB_URI': env['MONGODB_URI'], }, ) cronjob = aws_events.Rule( self, f"{id}-scheduled-event", enabled=True, schedule=aws_events.Schedule.cron(minute="0", hour="0"), # pylint: disable=no-value-for-parameter ) cronjob.add_target( aws_events_targets.EcsTask( cluster=cluster, task_definition=task, subnet_selection=aws_ec2.SubnetSelection( subnets=vpc.public_subnets), security_group=aws_ec2.SecurityGroup.from_security_group_id( self, f'{id}-default-security-group', vpc.vpc_default_security_group), )) buglink = "https://github.com/aws/aws-cdk/issues/9233" core.CfnOutput( self, f"{id}-fixme", value= f"FIXME: set cronjob 'Auto assign public IP' when this is fixed: {buglink}" )
def _create_train_step(self): stage = self.pipeline.add_stage(stage_name=f"{self.name_prefix}-stage") role = iam.Role( self, "Role", assumed_by=iam.ServicePrincipal("codebuild.amazonaws.com"), description="Role for CodeBuild", role_name=f"{self.name_prefix}-codebuild-role", managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonEC2ContainerRegistryFullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "AWSStepFunctionsFullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "service-role/AWSLambdaVPCAccessExecutionRole"), iam.ManagedPolicy.from_aws_managed_policy_name( "SecretsManagerReadWrite"), ], ) policy = iam.Policy(self, "PassRolePolicy") policy.document.add_statements( iam.PolicyStatement( actions=["iam:PassRole"], resources=[f"arn:aws:iam::{Stack.of(self).account}:role/*"])) role.attach_inline_policy(policy) build_spec = codebuild.BuildSpec.from_source_filename('buildspec.yml') project = codebuild.PipelineProject( self, "TrainingStepProject", build_spec=build_spec, environment=codebuild.BuildEnvironment( build_image=codebuild.LinuxBuildImage.STANDARD_5_0, privileged=True), role=role, security_groups=[self.security_group], subnet_selection=self.subnet_selection, vpc=self.vpc) action = codepipeline_actions.CodeBuildAction( action_name=f"{self.name_prefix}-training-action", project=project, input=self.source_output, environment_variables={ "EXEC_ID": codebuild.BuildEnvironmentVariable( value='#{codepipeline.PipelineExecutionId}'), "SFN_WORKFLOW_NAME": codebuild.BuildEnvironmentVariable(value=self.sfn_name) }, variables_namespace="trainStep", ) stage.add_action(action)
def add_service_account(self, cluster, name, namespace): """ workaround to add helm role to service account """ # create role conditions = core.CfnJson( self, 'ConditionJson', value={ "%s:aud" % cluster.cluster_open_id_connect_issuer: "sts.amazonaws.com", "%s:sub" % cluster.cluster_open_id_connect_issuer: "system:serviceaccount:%s:%s" % (namespace, name), }, ) principal = iam.OpenIdConnectPrincipal( cluster.open_id_connect_provider).with_conditions({ "StringEquals": conditions, }) role = iam.Role(self, 'ServiceAccountRole', assumed_by=principal) # create policy for the service account statements = [] with open('backend/iam_policy.json') as f: data = json.load(f) for s in data["Statement"]: statements.append(iam.PolicyStatement.from_json(s)) policy = iam.Policy(self, "LBControllerPolicy", statements=statements) policy.attach_to_role(role) return eks.KubernetesManifest( self, "ServiceAccount", cluster=cluster, manifest=[{ "apiVersion": "v1", "kind": "ServiceAccount", "metadata": { "name": name, "namespace": namespace, "labels": { "app.kubernetes.io/name": name, "app.kubernetes.io/managed-by": "Helm", }, "annotations": { "eks.amazonaws.com/role-arn": role.role_arn, "meta.helm.sh/release-name": name, "meta.helm.sh/release-namespace": namespace, }, }, }], )
def create_eks(self, vpc): # create eks cluster with amd nodegroup cluster = eks.Cluster( self, "EKS", vpc=vpc, version=eks.KubernetesVersion.V1_18, default_capacity_instance=ec2.InstanceType("m5.large"), default_capacity=1) # add arm/graviton nodegroup cluster.add_nodegroup_capacity( "graviton", desired_size=1, instance_type=ec2.InstanceType("m6g.large"), nodegroup_name="graviton", node_role=cluster.default_nodegroup.role) # add secret access to eks node role cluster.default_nodegroup.role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( "SecretsManagerReadWrite")) # create service account sa = cluster.add_service_account("LBControllerServiceAccount", name="aws-load-balancer-controller", namespace="kube-system") sa_annotated = self.add_helm_annotation(cluster, sa) # create policy for the service account statements = [] with open('backend/iam_policy.json') as f: data = json.load(f) for s in data["Statement"]: statements.append(iam.PolicyStatement.from_json(s)) policy = iam.Policy(self, "LBControllerPolicy", statements=statements) policy.attach_to_role(sa.role) # add helm charts ingress = cluster.add_helm_chart( "LBIngress", chart="aws-load-balancer-controller", release="aws-load-balancer-controller", repository="https://aws.github.io/eks-charts", namespace="kube-system", values={ "clusterName": cluster.cluster_name, "serviceAccount.name": "aws-load-balancer-controller", "serviceAccount.create": "false" }) ingress.node.add_dependency(sa_annotated) return cluster
def __init__(self, app: core.App, id: str, **kwargs) -> None: super().__init__(app, id, **kwargs) self.platform_resources = ImportedResources(self, self.stack_name) self.rds_security_group = aws_ec2.SecurityGroup( self, "rds-security-group", vpc=self.platform_resources.vpc, allow_all_outbound=True) self.rds_security_group.add_ingress_rule( peer=aws_ec2.Peer.ipv4(self.platform_resources.vpc.vpc_cidr_block), connection=aws_ec2.Port.tcp(3306)) my_secret = aws_secretsmanager.Secret.from_secret_name( self, "DBSecret", "support/octicketing/rds") self.rds = aws_rds.DatabaseInstance( self, "support-rds", database_name="support_db", instance_identifier='support-db', credentials=aws_rds.Credentials.from_secret(my_secret), engine=aws_rds.DatabaseInstanceEngine.mysql( version=aws_rds.MysqlEngineVersion.VER_5_6), vpc=self.platform_resources.vpc, port=3306, instance_type=aws_ec2.InstanceType.of( aws_ec2.InstanceClass.BURSTABLE3, aws_ec2.InstanceSize.MICRO, ), removal_policy=core.RemovalPolicy.DESTROY, security_groups=[self.rds_security_group], deletion_protection=False) self.db_auth_policy_stmt = aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=["rds-db:connect"], resources=[self.rds.instance_arn]) policy_doc = aws_iam.PolicyDocument() policy_doc.add_statements(self.db_auth_policy_stmt) self.db_auth_policy = aws_iam.Policy( self, 'db-auth-policy', policy_name='RdsDbAuthPolicy', statements=[self.db_auth_policy_stmt]) self.db_auth_role = aws_iam.Role( self, "db-auth-role", role_name='RdsDbAuthRole', assumed_by=aws_iam.ServicePrincipal('ec2.amazonaws.com')) self.db_auth_role.add_to_policy(self.db_auth_policy_stmt)
def create_lambda_check_image_status(self) -> Resource: """dockerイメージのビルド状況を確認するLambda Returns: Resource: lambda """ lambdaFn_name = self.get_lambda_name("check_image_status") role_name = self.get_role_name("check_image_status") lambda_role = aws_iam.Role( self, id=role_name, assumed_by=aws_iam.ServicePrincipal("lambda.amazonaws.com"), role_name=role_name, path="/service-role/", managed_policies=[ aws_iam.ManagedPolicy.from_aws_managed_policy_name( "service-role/AWSLambdaBasicExecutionRole") ]) lambda_role.attach_inline_policy( aws_iam.Policy( self, "AllowCodeBuildStatus", document=aws_iam.PolicyDocument(statements=[ aws_iam.PolicyStatement( actions=["codebuild:BatchGetBuilds"], resources=[ self._docker_image_buildproject.project_arn ]), aws_iam.PolicyStatement(actions=[ "dynamodb:PutItem", "dynamodb:GetItem", "dynamodb:UpdateItem" ], resources=[self._table.table_arn]) ]))) lambdaFn_path = self.get_lambda_path("check_image_status") lambdaFn = aws_lambda.Function( self, id=lambdaFn_name, function_name=lambdaFn_name, code=aws_lambda.AssetCode(path=lambdaFn_path), handler="lambda_handler.handler", timeout=Duration.seconds(10), runtime=aws_lambda.Runtime.PYTHON_3_9, description="コンポーネント用のイメージのビルド結果を確認", role=lambda_role, environment={"TABLE_NAME": self._table.table_name}) return lambdaFn
def __init__(self, scope: core.Construct, id: str, bucket: s3.Bucket, capture_bucket: s3.Bucket, write_topic: sns.Topic, **kwargs) -> None: super().__init__(scope, id, **kwargs) # create an iam user "RaspberryPiUser" rasberry_pi_user = iam.User( self, "RaspberryPiUser", managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "AWSCodeCommitPowerUser"), iam.ManagedPolicy.from_aws_managed_policy_name( "AWSIoTFullAccess") ]) # create an iam policy statement for "WriteToS3Policy" write_to_s3_policy_statement = iam.PolicyStatement( actions=["s3:PutObject"], resources=[bucket.bucket_arn, capture_bucket.bucket_arn]) # create an iam policy "WriteToS3Policy" write_to_s3_policy = iam.Policy( self, "WriteToS3Policy", statements=[write_to_s3_policy_statement], users=[rasberry_pi_user]) # create an iam policy statement to allow RaspberryPiUser to publish to sns topic publish_to_sns_topic_policy_statement = iam.PolicyStatement( actions=["sns:Publish"], resources=[write_topic.topic_arn]) # create an iam policy "PublishToSnsTopic" publish_to_sns_topic_policy = iam.Policy( self, "PublishToSnsTopicPolicy", statements=[publish_to_sns_topic_policy_statement], users=[rasberry_pi_user])
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # IAM policy for firehose es_policy_statement = iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "es:DescribeElasticsearchDomain", "es:DescribeElasticsearchDomains", "es:DescribeElasticsearchDomainConfig", "es:ESHttpPost", "es:ESHttpPut", ], resources=[ "*", ], ) es_policy_doc = iam.PolicyDocument(statements=[es_policy_statement]) s3_policy_statement = iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=["s3:*"], resources=["*"]) s3_policy_doc = iam.PolicyDocument(statements=[s3_policy_statement]) # creating Role self.firehose_role = iam.Role( self, id="ddbqiRole", role_name=constants["FIREHOSE_ROLE"], assumed_by=iam.ServicePrincipal("firehose.amazonaws.com"), ) # IAM policy firehose_policy = iam.Policy( self, "firehose_policy", policy_name="ddbqiFhPolicy", statements=[es_policy_statement, s3_policy_statement], roles=[self.firehose_role]) # attach policy to role #firehose_role.add_to_policy(es_policy_statement) #firehose_role.add_to_policy(s3_policy_statement) #firehose_policy.attach_to_role(firehose_role) core.CfnOutput(self, 'RoleArn', export_name="rolearn", value=self.firehose_role.role_arn, description="firehose role arn") core.Tags.of(self.firehose_role).add("project", constants["PROJECT_TAG"])